xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 955b5b6c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	mutex_lock(&conn->chan_lock);
123 	c = __l2cap_get_chan_by_scid(conn, cid);
124 	if (c) {
125 		/* Only lock if chan reference is not 0 */
126 		c = l2cap_chan_hold_unless_zero(c);
127 		if (c)
128 			l2cap_chan_lock(c);
129 	}
130 	mutex_unlock(&conn->chan_lock);
131 
132 	return c;
133 }
134 
135 /* Find channel with given DCID.
136  * Returns a reference locked channel.
137  */
138 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
139 						 u16 cid)
140 {
141 	struct l2cap_chan *c;
142 
143 	mutex_lock(&conn->chan_lock);
144 	c = __l2cap_get_chan_by_dcid(conn, cid);
145 	if (c) {
146 		/* Only lock if chan reference is not 0 */
147 		c = l2cap_chan_hold_unless_zero(c);
148 		if (c)
149 			l2cap_chan_lock(c);
150 	}
151 	mutex_unlock(&conn->chan_lock);
152 
153 	return c;
154 }
155 
156 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
157 						    u8 ident)
158 {
159 	struct l2cap_chan *c;
160 
161 	list_for_each_entry(c, &conn->chan_l, list) {
162 		if (c->ident == ident)
163 			return c;
164 	}
165 	return NULL;
166 }
167 
168 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
169 						      u8 src_type)
170 {
171 	struct l2cap_chan *c;
172 
173 	list_for_each_entry(c, &chan_list, global_l) {
174 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
175 			continue;
176 
177 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
178 			continue;
179 
180 		if (c->sport == psm && !bacmp(&c->src, src))
181 			return c;
182 	}
183 	return NULL;
184 }
185 
186 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
187 {
188 	int err;
189 
190 	write_lock(&chan_list_lock);
191 
192 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
193 		err = -EADDRINUSE;
194 		goto done;
195 	}
196 
197 	if (psm) {
198 		chan->psm = psm;
199 		chan->sport = psm;
200 		err = 0;
201 	} else {
202 		u16 p, start, end, incr;
203 
204 		if (chan->src_type == BDADDR_BREDR) {
205 			start = L2CAP_PSM_DYN_START;
206 			end = L2CAP_PSM_AUTO_END;
207 			incr = 2;
208 		} else {
209 			start = L2CAP_PSM_LE_DYN_START;
210 			end = L2CAP_PSM_LE_DYN_END;
211 			incr = 1;
212 		}
213 
214 		err = -EINVAL;
215 		for (p = start; p <= end; p += incr)
216 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
217 							 chan->src_type)) {
218 				chan->psm   = cpu_to_le16(p);
219 				chan->sport = cpu_to_le16(p);
220 				err = 0;
221 				break;
222 			}
223 	}
224 
225 done:
226 	write_unlock(&chan_list_lock);
227 	return err;
228 }
229 EXPORT_SYMBOL_GPL(l2cap_add_psm);
230 
231 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
232 {
233 	write_lock(&chan_list_lock);
234 
235 	/* Override the defaults (which are for conn-oriented) */
236 	chan->omtu = L2CAP_DEFAULT_MTU;
237 	chan->chan_type = L2CAP_CHAN_FIXED;
238 
239 	chan->scid = scid;
240 
241 	write_unlock(&chan_list_lock);
242 
243 	return 0;
244 }
245 
246 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
247 {
248 	u16 cid, dyn_end;
249 
250 	if (conn->hcon->type == LE_LINK)
251 		dyn_end = L2CAP_CID_LE_DYN_END;
252 	else
253 		dyn_end = L2CAP_CID_DYN_END;
254 
255 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
256 		if (!__l2cap_get_chan_by_scid(conn, cid))
257 			return cid;
258 	}
259 
260 	return 0;
261 }
262 
263 static void l2cap_state_change(struct l2cap_chan *chan, int state)
264 {
265 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
266 	       state_to_string(state));
267 
268 	chan->state = state;
269 	chan->ops->state_change(chan, state, 0);
270 }
271 
272 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
273 						int state, int err)
274 {
275 	chan->state = state;
276 	chan->ops->state_change(chan, chan->state, err);
277 }
278 
279 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
280 {
281 	chan->ops->state_change(chan, chan->state, err);
282 }
283 
284 static void __set_retrans_timer(struct l2cap_chan *chan)
285 {
286 	if (!delayed_work_pending(&chan->monitor_timer) &&
287 	    chan->retrans_timeout) {
288 		l2cap_set_timer(chan, &chan->retrans_timer,
289 				msecs_to_jiffies(chan->retrans_timeout));
290 	}
291 }
292 
293 static void __set_monitor_timer(struct l2cap_chan *chan)
294 {
295 	__clear_retrans_timer(chan);
296 	if (chan->monitor_timeout) {
297 		l2cap_set_timer(chan, &chan->monitor_timer,
298 				msecs_to_jiffies(chan->monitor_timeout));
299 	}
300 }
301 
302 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
303 					       u16 seq)
304 {
305 	struct sk_buff *skb;
306 
307 	skb_queue_walk(head, skb) {
308 		if (bt_cb(skb)->l2cap.txseq == seq)
309 			return skb;
310 	}
311 
312 	return NULL;
313 }
314 
315 /* ---- L2CAP sequence number lists ---- */
316 
317 /* For ERTM, ordered lists of sequence numbers must be tracked for
318  * SREJ requests that are received and for frames that are to be
319  * retransmitted. These seq_list functions implement a singly-linked
320  * list in an array, where membership in the list can also be checked
321  * in constant time. Items can also be added to the tail of the list
322  * and removed from the head in constant time, without further memory
323  * allocs or frees.
324  */
325 
326 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
327 {
328 	size_t alloc_size, i;
329 
330 	/* Allocated size is a power of 2 to map sequence numbers
331 	 * (which may be up to 14 bits) in to a smaller array that is
332 	 * sized for the negotiated ERTM transmit windows.
333 	 */
334 	alloc_size = roundup_pow_of_two(size);
335 
336 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
337 	if (!seq_list->list)
338 		return -ENOMEM;
339 
340 	seq_list->mask = alloc_size - 1;
341 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
342 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 	for (i = 0; i < alloc_size; i++)
344 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
345 
346 	return 0;
347 }
348 
349 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
350 {
351 	kfree(seq_list->list);
352 }
353 
354 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 					   u16 seq)
356 {
357 	/* Constant-time check for list membership */
358 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 }
360 
361 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
362 {
363 	u16 seq = seq_list->head;
364 	u16 mask = seq_list->mask;
365 
366 	seq_list->head = seq_list->list[seq & mask];
367 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
368 
369 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
370 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
371 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
372 	}
373 
374 	return seq;
375 }
376 
377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
378 {
379 	u16 i;
380 
381 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 		return;
383 
384 	for (i = 0; i <= seq_list->mask; i++)
385 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
386 
387 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 }
390 
391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
392 {
393 	u16 mask = seq_list->mask;
394 
395 	/* All appends happen in constant time */
396 
397 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 		return;
399 
400 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
401 		seq_list->head = seq;
402 	else
403 		seq_list->list[seq_list->tail & mask] = seq;
404 
405 	seq_list->tail = seq;
406 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 }
408 
409 static void l2cap_chan_timeout(struct work_struct *work)
410 {
411 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
412 					       chan_timer.work);
413 	struct l2cap_conn *conn = chan->conn;
414 	int reason;
415 
416 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
417 
418 	if (!conn)
419 		return;
420 
421 	mutex_lock(&conn->chan_lock);
422 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
423 	 * this work. No need to call l2cap_chan_hold(chan) here again.
424 	 */
425 	l2cap_chan_lock(chan);
426 
427 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
428 		reason = ECONNREFUSED;
429 	else if (chan->state == BT_CONNECT &&
430 		 chan->sec_level != BT_SECURITY_SDP)
431 		reason = ECONNREFUSED;
432 	else
433 		reason = ETIMEDOUT;
434 
435 	l2cap_chan_close(chan, reason);
436 
437 	chan->ops->close(chan);
438 
439 	l2cap_chan_unlock(chan);
440 	l2cap_chan_put(chan);
441 
442 	mutex_unlock(&conn->chan_lock);
443 }
444 
445 struct l2cap_chan *l2cap_chan_create(void)
446 {
447 	struct l2cap_chan *chan;
448 
449 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
450 	if (!chan)
451 		return NULL;
452 
453 	skb_queue_head_init(&chan->tx_q);
454 	skb_queue_head_init(&chan->srej_q);
455 	mutex_init(&chan->lock);
456 
457 	/* Set default lock nesting level */
458 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
459 
460 	write_lock(&chan_list_lock);
461 	list_add(&chan->global_l, &chan_list);
462 	write_unlock(&chan_list_lock);
463 
464 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
465 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
466 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
467 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
468 
469 	chan->state = BT_OPEN;
470 
471 	kref_init(&chan->kref);
472 
473 	/* This flag is cleared in l2cap_chan_ready() */
474 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
475 
476 	BT_DBG("chan %p", chan);
477 
478 	return chan;
479 }
480 EXPORT_SYMBOL_GPL(l2cap_chan_create);
481 
482 static void l2cap_chan_destroy(struct kref *kref)
483 {
484 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
485 
486 	BT_DBG("chan %p", chan);
487 
488 	write_lock(&chan_list_lock);
489 	list_del(&chan->global_l);
490 	write_unlock(&chan_list_lock);
491 
492 	kfree(chan);
493 }
494 
495 void l2cap_chan_hold(struct l2cap_chan *c)
496 {
497 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
498 
499 	kref_get(&c->kref);
500 }
501 
502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 
506 	if (!kref_get_unless_zero(&c->kref))
507 		return NULL;
508 
509 	return c;
510 }
511 
512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519 
520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 	chan->fcs  = L2CAP_FCS_CRC16;
523 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 	chan->remote_max_tx = chan->max_tx;
527 	chan->remote_tx_win = chan->tx_win;
528 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 	chan->sec_level = BT_SECURITY_LOW;
530 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533 
534 	chan->conf_state = 0;
535 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536 
537 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540 
541 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
542 {
543 	chan->sdu = NULL;
544 	chan->sdu_last_frag = NULL;
545 	chan->sdu_len = 0;
546 	chan->tx_credits = tx_credits;
547 	/* Derive MPS from connection MTU to stop HCI fragmentation */
548 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
549 	/* Give enough credits for a full packet */
550 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
551 
552 	skb_queue_head_init(&chan->tx_q);
553 }
554 
555 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
556 {
557 	l2cap_le_flowctl_init(chan, tx_credits);
558 
559 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
560 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
561 		chan->mps = L2CAP_ECRED_MIN_MPS;
562 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
563 	}
564 }
565 
566 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
567 {
568 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
569 	       __le16_to_cpu(chan->psm), chan->dcid);
570 
571 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
572 
573 	chan->conn = conn;
574 
575 	switch (chan->chan_type) {
576 	case L2CAP_CHAN_CONN_ORIENTED:
577 		/* Alloc CID for connection-oriented socket */
578 		chan->scid = l2cap_alloc_cid(conn);
579 		if (conn->hcon->type == ACL_LINK)
580 			chan->omtu = L2CAP_DEFAULT_MTU;
581 		break;
582 
583 	case L2CAP_CHAN_CONN_LESS:
584 		/* Connectionless socket */
585 		chan->scid = L2CAP_CID_CONN_LESS;
586 		chan->dcid = L2CAP_CID_CONN_LESS;
587 		chan->omtu = L2CAP_DEFAULT_MTU;
588 		break;
589 
590 	case L2CAP_CHAN_FIXED:
591 		/* Caller will set CID and CID specific MTU values */
592 		break;
593 
594 	default:
595 		/* Raw socket can send/recv signalling messages only */
596 		chan->scid = L2CAP_CID_SIGNALING;
597 		chan->dcid = L2CAP_CID_SIGNALING;
598 		chan->omtu = L2CAP_DEFAULT_MTU;
599 	}
600 
601 	chan->local_id		= L2CAP_BESTEFFORT_ID;
602 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
603 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
604 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
605 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
606 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
607 
608 	l2cap_chan_hold(chan);
609 
610 	/* Only keep a reference for fixed channels if they requested it */
611 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 		hci_conn_hold(conn->hcon);
614 
615 	list_add(&chan->list, &conn->chan_l);
616 }
617 
618 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
619 {
620 	mutex_lock(&conn->chan_lock);
621 	__l2cap_chan_add(conn, chan);
622 	mutex_unlock(&conn->chan_lock);
623 }
624 
625 void l2cap_chan_del(struct l2cap_chan *chan, int err)
626 {
627 	struct l2cap_conn *conn = chan->conn;
628 
629 	__clear_chan_timer(chan);
630 
631 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
632 	       state_to_string(chan->state));
633 
634 	chan->ops->teardown(chan, err);
635 
636 	if (conn) {
637 		/* Delete from channel list */
638 		list_del(&chan->list);
639 
640 		l2cap_chan_put(chan);
641 
642 		chan->conn = NULL;
643 
644 		/* Reference was only held for non-fixed channels or
645 		 * fixed channels that explicitly requested it using the
646 		 * FLAG_HOLD_HCI_CONN flag.
647 		 */
648 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
649 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
650 			hci_conn_drop(conn->hcon);
651 	}
652 
653 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
654 		return;
655 
656 	switch (chan->mode) {
657 	case L2CAP_MODE_BASIC:
658 		break;
659 
660 	case L2CAP_MODE_LE_FLOWCTL:
661 	case L2CAP_MODE_EXT_FLOWCTL:
662 		skb_queue_purge(&chan->tx_q);
663 		break;
664 
665 	case L2CAP_MODE_ERTM:
666 		__clear_retrans_timer(chan);
667 		__clear_monitor_timer(chan);
668 		__clear_ack_timer(chan);
669 
670 		skb_queue_purge(&chan->srej_q);
671 
672 		l2cap_seq_list_free(&chan->srej_list);
673 		l2cap_seq_list_free(&chan->retrans_list);
674 		fallthrough;
675 
676 	case L2CAP_MODE_STREAMING:
677 		skb_queue_purge(&chan->tx_q);
678 		break;
679 	}
680 }
681 EXPORT_SYMBOL_GPL(l2cap_chan_del);
682 
683 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
684 				 l2cap_chan_func_t func, void *data)
685 {
686 	struct l2cap_chan *chan, *l;
687 
688 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
689 		if (chan->ident == id)
690 			func(chan, data);
691 	}
692 }
693 
694 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
695 			      void *data)
696 {
697 	struct l2cap_chan *chan;
698 
699 	list_for_each_entry(chan, &conn->chan_l, list) {
700 		func(chan, data);
701 	}
702 }
703 
704 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
705 		     void *data)
706 {
707 	if (!conn)
708 		return;
709 
710 	mutex_lock(&conn->chan_lock);
711 	__l2cap_chan_list(conn, func, data);
712 	mutex_unlock(&conn->chan_lock);
713 }
714 
715 EXPORT_SYMBOL_GPL(l2cap_chan_list);
716 
717 static void l2cap_conn_update_id_addr(struct work_struct *work)
718 {
719 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
720 					       id_addr_timer.work);
721 	struct hci_conn *hcon = conn->hcon;
722 	struct l2cap_chan *chan;
723 
724 	mutex_lock(&conn->chan_lock);
725 
726 	list_for_each_entry(chan, &conn->chan_l, list) {
727 		l2cap_chan_lock(chan);
728 		bacpy(&chan->dst, &hcon->dst);
729 		chan->dst_type = bdaddr_dst_type(hcon);
730 		l2cap_chan_unlock(chan);
731 	}
732 
733 	mutex_unlock(&conn->chan_lock);
734 }
735 
736 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
737 {
738 	struct l2cap_conn *conn = chan->conn;
739 	struct l2cap_le_conn_rsp rsp;
740 	u16 result;
741 
742 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
743 		result = L2CAP_CR_LE_AUTHORIZATION;
744 	else
745 		result = L2CAP_CR_LE_BAD_PSM;
746 
747 	l2cap_state_change(chan, BT_DISCONN);
748 
749 	rsp.dcid    = cpu_to_le16(chan->scid);
750 	rsp.mtu     = cpu_to_le16(chan->imtu);
751 	rsp.mps     = cpu_to_le16(chan->mps);
752 	rsp.credits = cpu_to_le16(chan->rx_credits);
753 	rsp.result  = cpu_to_le16(result);
754 
755 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
756 		       &rsp);
757 }
758 
759 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
760 {
761 	l2cap_state_change(chan, BT_DISCONN);
762 
763 	__l2cap_ecred_conn_rsp_defer(chan);
764 }
765 
766 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
767 {
768 	struct l2cap_conn *conn = chan->conn;
769 	struct l2cap_conn_rsp rsp;
770 	u16 result;
771 
772 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 		result = L2CAP_CR_SEC_BLOCK;
774 	else
775 		result = L2CAP_CR_BAD_PSM;
776 
777 	l2cap_state_change(chan, BT_DISCONN);
778 
779 	rsp.scid   = cpu_to_le16(chan->dcid);
780 	rsp.dcid   = cpu_to_le16(chan->scid);
781 	rsp.result = cpu_to_le16(result);
782 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 
784 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
785 }
786 
787 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 
791 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
792 
793 	switch (chan->state) {
794 	case BT_LISTEN:
795 		chan->ops->teardown(chan, 0);
796 		break;
797 
798 	case BT_CONNECTED:
799 	case BT_CONFIG:
800 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
801 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
802 			l2cap_send_disconn_req(chan, reason);
803 		} else
804 			l2cap_chan_del(chan, reason);
805 		break;
806 
807 	case BT_CONNECT2:
808 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
809 			if (conn->hcon->type == ACL_LINK)
810 				l2cap_chan_connect_reject(chan);
811 			else if (conn->hcon->type == LE_LINK) {
812 				switch (chan->mode) {
813 				case L2CAP_MODE_LE_FLOWCTL:
814 					l2cap_chan_le_connect_reject(chan);
815 					break;
816 				case L2CAP_MODE_EXT_FLOWCTL:
817 					l2cap_chan_ecred_connect_reject(chan);
818 					return;
819 				}
820 			}
821 		}
822 
823 		l2cap_chan_del(chan, reason);
824 		break;
825 
826 	case BT_CONNECT:
827 	case BT_DISCONN:
828 		l2cap_chan_del(chan, reason);
829 		break;
830 
831 	default:
832 		chan->ops->teardown(chan, 0);
833 		break;
834 	}
835 }
836 EXPORT_SYMBOL(l2cap_chan_close);
837 
838 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
839 {
840 	switch (chan->chan_type) {
841 	case L2CAP_CHAN_RAW:
842 		switch (chan->sec_level) {
843 		case BT_SECURITY_HIGH:
844 		case BT_SECURITY_FIPS:
845 			return HCI_AT_DEDICATED_BONDING_MITM;
846 		case BT_SECURITY_MEDIUM:
847 			return HCI_AT_DEDICATED_BONDING;
848 		default:
849 			return HCI_AT_NO_BONDING;
850 		}
851 		break;
852 	case L2CAP_CHAN_CONN_LESS:
853 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
854 			if (chan->sec_level == BT_SECURITY_LOW)
855 				chan->sec_level = BT_SECURITY_SDP;
856 		}
857 		if (chan->sec_level == BT_SECURITY_HIGH ||
858 		    chan->sec_level == BT_SECURITY_FIPS)
859 			return HCI_AT_NO_BONDING_MITM;
860 		else
861 			return HCI_AT_NO_BONDING;
862 		break;
863 	case L2CAP_CHAN_CONN_ORIENTED:
864 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
865 			if (chan->sec_level == BT_SECURITY_LOW)
866 				chan->sec_level = BT_SECURITY_SDP;
867 
868 			if (chan->sec_level == BT_SECURITY_HIGH ||
869 			    chan->sec_level == BT_SECURITY_FIPS)
870 				return HCI_AT_NO_BONDING_MITM;
871 			else
872 				return HCI_AT_NO_BONDING;
873 		}
874 		fallthrough;
875 
876 	default:
877 		switch (chan->sec_level) {
878 		case BT_SECURITY_HIGH:
879 		case BT_SECURITY_FIPS:
880 			return HCI_AT_GENERAL_BONDING_MITM;
881 		case BT_SECURITY_MEDIUM:
882 			return HCI_AT_GENERAL_BONDING;
883 		default:
884 			return HCI_AT_NO_BONDING;
885 		}
886 		break;
887 	}
888 }
889 
890 /* Service level security */
891 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
892 {
893 	struct l2cap_conn *conn = chan->conn;
894 	__u8 auth_type;
895 
896 	if (conn->hcon->type == LE_LINK)
897 		return smp_conn_security(conn->hcon, chan->sec_level);
898 
899 	auth_type = l2cap_get_auth_type(chan);
900 
901 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
902 				 initiator);
903 }
904 
905 static u8 l2cap_get_ident(struct l2cap_conn *conn)
906 {
907 	u8 id;
908 
909 	/* Get next available identificator.
910 	 *    1 - 128 are used by kernel.
911 	 *  129 - 199 are reserved.
912 	 *  200 - 254 are used by utilities like l2ping, etc.
913 	 */
914 
915 	mutex_lock(&conn->ident_lock);
916 
917 	if (++conn->tx_ident > 128)
918 		conn->tx_ident = 1;
919 
920 	id = conn->tx_ident;
921 
922 	mutex_unlock(&conn->ident_lock);
923 
924 	return id;
925 }
926 
927 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
928 			   void *data)
929 {
930 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
931 	u8 flags;
932 
933 	BT_DBG("code 0x%2.2x", code);
934 
935 	if (!skb)
936 		return;
937 
938 	/* Use NO_FLUSH if supported or we have an LE link (which does
939 	 * not support auto-flushing packets) */
940 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
941 	    conn->hcon->type == LE_LINK)
942 		flags = ACL_START_NO_FLUSH;
943 	else
944 		flags = ACL_START;
945 
946 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
947 	skb->priority = HCI_PRIO_MAX;
948 
949 	hci_send_acl(conn->hchan, skb, flags);
950 }
951 
952 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
953 {
954 	struct hci_conn *hcon = chan->conn->hcon;
955 	u16 flags;
956 
957 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
958 	       skb->priority);
959 
960 	/* Use NO_FLUSH for LE links (where this is the only option) or
961 	 * if the BR/EDR link supports it and flushing has not been
962 	 * explicitly requested (through FLAG_FLUSHABLE).
963 	 */
964 	if (hcon->type == LE_LINK ||
965 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
966 	     lmp_no_flush_capable(hcon->hdev)))
967 		flags = ACL_START_NO_FLUSH;
968 	else
969 		flags = ACL_START;
970 
971 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
972 	hci_send_acl(chan->conn->hchan, skb, flags);
973 }
974 
975 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
976 {
977 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
978 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
979 
980 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
981 		/* S-Frame */
982 		control->sframe = 1;
983 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
984 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
985 
986 		control->sar = 0;
987 		control->txseq = 0;
988 	} else {
989 		/* I-Frame */
990 		control->sframe = 0;
991 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
992 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
993 
994 		control->poll = 0;
995 		control->super = 0;
996 	}
997 }
998 
999 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1000 {
1001 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1002 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1003 
1004 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1005 		/* S-Frame */
1006 		control->sframe = 1;
1007 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1008 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1009 
1010 		control->sar = 0;
1011 		control->txseq = 0;
1012 	} else {
1013 		/* I-Frame */
1014 		control->sframe = 0;
1015 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1016 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1017 
1018 		control->poll = 0;
1019 		control->super = 0;
1020 	}
1021 }
1022 
1023 static inline void __unpack_control(struct l2cap_chan *chan,
1024 				    struct sk_buff *skb)
1025 {
1026 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 		__unpack_extended_control(get_unaligned_le32(skb->data),
1028 					  &bt_cb(skb)->l2cap);
1029 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1030 	} else {
1031 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1032 					  &bt_cb(skb)->l2cap);
1033 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1034 	}
1035 }
1036 
1037 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1038 {
1039 	u32 packed;
1040 
1041 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1043 
1044 	if (control->sframe) {
1045 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1046 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1047 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1048 	} else {
1049 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1050 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1051 	}
1052 
1053 	return packed;
1054 }
1055 
1056 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1057 {
1058 	u16 packed;
1059 
1060 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1061 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1062 
1063 	if (control->sframe) {
1064 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1065 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1066 		packed |= L2CAP_CTRL_FRAME_TYPE;
1067 	} else {
1068 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1069 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1070 	}
1071 
1072 	return packed;
1073 }
1074 
1075 static inline void __pack_control(struct l2cap_chan *chan,
1076 				  struct l2cap_ctrl *control,
1077 				  struct sk_buff *skb)
1078 {
1079 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1080 		put_unaligned_le32(__pack_extended_control(control),
1081 				   skb->data + L2CAP_HDR_SIZE);
1082 	} else {
1083 		put_unaligned_le16(__pack_enhanced_control(control),
1084 				   skb->data + L2CAP_HDR_SIZE);
1085 	}
1086 }
1087 
1088 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1089 {
1090 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1091 		return L2CAP_EXT_HDR_SIZE;
1092 	else
1093 		return L2CAP_ENH_HDR_SIZE;
1094 }
1095 
1096 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1097 					       u32 control)
1098 {
1099 	struct sk_buff *skb;
1100 	struct l2cap_hdr *lh;
1101 	int hlen = __ertm_hdr_size(chan);
1102 
1103 	if (chan->fcs == L2CAP_FCS_CRC16)
1104 		hlen += L2CAP_FCS_SIZE;
1105 
1106 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1107 
1108 	if (!skb)
1109 		return ERR_PTR(-ENOMEM);
1110 
1111 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1112 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1113 	lh->cid = cpu_to_le16(chan->dcid);
1114 
1115 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1116 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1117 	else
1118 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1119 
1120 	if (chan->fcs == L2CAP_FCS_CRC16) {
1121 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1122 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1123 	}
1124 
1125 	skb->priority = HCI_PRIO_MAX;
1126 	return skb;
1127 }
1128 
1129 static void l2cap_send_sframe(struct l2cap_chan *chan,
1130 			      struct l2cap_ctrl *control)
1131 {
1132 	struct sk_buff *skb;
1133 	u32 control_field;
1134 
1135 	BT_DBG("chan %p, control %p", chan, control);
1136 
1137 	if (!control->sframe)
1138 		return;
1139 
1140 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1141 	    !control->poll)
1142 		control->final = 1;
1143 
1144 	if (control->super == L2CAP_SUPER_RR)
1145 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1146 	else if (control->super == L2CAP_SUPER_RNR)
1147 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1148 
1149 	if (control->super != L2CAP_SUPER_SREJ) {
1150 		chan->last_acked_seq = control->reqseq;
1151 		__clear_ack_timer(chan);
1152 	}
1153 
1154 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1155 	       control->final, control->poll, control->super);
1156 
1157 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1158 		control_field = __pack_extended_control(control);
1159 	else
1160 		control_field = __pack_enhanced_control(control);
1161 
1162 	skb = l2cap_create_sframe_pdu(chan, control_field);
1163 	if (!IS_ERR(skb))
1164 		l2cap_do_send(chan, skb);
1165 }
1166 
1167 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1168 {
1169 	struct l2cap_ctrl control;
1170 
1171 	BT_DBG("chan %p, poll %d", chan, poll);
1172 
1173 	memset(&control, 0, sizeof(control));
1174 	control.sframe = 1;
1175 	control.poll = poll;
1176 
1177 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1178 		control.super = L2CAP_SUPER_RNR;
1179 	else
1180 		control.super = L2CAP_SUPER_RR;
1181 
1182 	control.reqseq = chan->buffer_seq;
1183 	l2cap_send_sframe(chan, &control);
1184 }
1185 
1186 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1187 {
1188 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1189 		return true;
1190 
1191 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1192 }
1193 
1194 void l2cap_send_conn_req(struct l2cap_chan *chan)
1195 {
1196 	struct l2cap_conn *conn = chan->conn;
1197 	struct l2cap_conn_req req;
1198 
1199 	req.scid = cpu_to_le16(chan->scid);
1200 	req.psm  = chan->psm;
1201 
1202 	chan->ident = l2cap_get_ident(conn);
1203 
1204 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1205 
1206 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1207 }
1208 
1209 static void l2cap_chan_ready(struct l2cap_chan *chan)
1210 {
1211 	/* The channel may have already been flagged as connected in
1212 	 * case of receiving data before the L2CAP info req/rsp
1213 	 * procedure is complete.
1214 	 */
1215 	if (chan->state == BT_CONNECTED)
1216 		return;
1217 
1218 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1219 	chan->conf_state = 0;
1220 	__clear_chan_timer(chan);
1221 
1222 	switch (chan->mode) {
1223 	case L2CAP_MODE_LE_FLOWCTL:
1224 	case L2CAP_MODE_EXT_FLOWCTL:
1225 		if (!chan->tx_credits)
1226 			chan->ops->suspend(chan);
1227 		break;
1228 	}
1229 
1230 	chan->state = BT_CONNECTED;
1231 
1232 	chan->ops->ready(chan);
1233 }
1234 
1235 static void l2cap_le_connect(struct l2cap_chan *chan)
1236 {
1237 	struct l2cap_conn *conn = chan->conn;
1238 	struct l2cap_le_conn_req req;
1239 
1240 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1241 		return;
1242 
1243 	if (!chan->imtu)
1244 		chan->imtu = chan->conn->mtu;
1245 
1246 	l2cap_le_flowctl_init(chan, 0);
1247 
1248 	memset(&req, 0, sizeof(req));
1249 	req.psm     = chan->psm;
1250 	req.scid    = cpu_to_le16(chan->scid);
1251 	req.mtu     = cpu_to_le16(chan->imtu);
1252 	req.mps     = cpu_to_le16(chan->mps);
1253 	req.credits = cpu_to_le16(chan->rx_credits);
1254 
1255 	chan->ident = l2cap_get_ident(conn);
1256 
1257 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1258 		       sizeof(req), &req);
1259 }
1260 
1261 struct l2cap_ecred_conn_data {
1262 	struct {
1263 		struct l2cap_ecred_conn_req req;
1264 		__le16 scid[5];
1265 	} __packed pdu;
1266 	struct l2cap_chan *chan;
1267 	struct pid *pid;
1268 	int count;
1269 };
1270 
1271 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1272 {
1273 	struct l2cap_ecred_conn_data *conn = data;
1274 	struct pid *pid;
1275 
1276 	if (chan == conn->chan)
1277 		return;
1278 
1279 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1280 		return;
1281 
1282 	pid = chan->ops->get_peer_pid(chan);
1283 
1284 	/* Only add deferred channels with the same PID/PSM */
1285 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1286 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1287 		return;
1288 
1289 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1290 		return;
1291 
1292 	l2cap_ecred_init(chan, 0);
1293 
1294 	/* Set the same ident so we can match on the rsp */
1295 	chan->ident = conn->chan->ident;
1296 
1297 	/* Include all channels deferred */
1298 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1299 
1300 	conn->count++;
1301 }
1302 
1303 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1304 {
1305 	struct l2cap_conn *conn = chan->conn;
1306 	struct l2cap_ecred_conn_data data;
1307 
1308 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1309 		return;
1310 
1311 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1312 		return;
1313 
1314 	l2cap_ecred_init(chan, 0);
1315 
1316 	memset(&data, 0, sizeof(data));
1317 	data.pdu.req.psm     = chan->psm;
1318 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1319 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1320 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1321 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1322 
1323 	chan->ident = l2cap_get_ident(conn);
1324 
1325 	data.count = 1;
1326 	data.chan = chan;
1327 	data.pid = chan->ops->get_peer_pid(chan);
1328 
1329 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1330 
1331 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1332 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1333 		       &data.pdu);
1334 }
1335 
1336 static void l2cap_le_start(struct l2cap_chan *chan)
1337 {
1338 	struct l2cap_conn *conn = chan->conn;
1339 
1340 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1341 		return;
1342 
1343 	if (!chan->psm) {
1344 		l2cap_chan_ready(chan);
1345 		return;
1346 	}
1347 
1348 	if (chan->state == BT_CONNECT) {
1349 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1350 			l2cap_ecred_connect(chan);
1351 		else
1352 			l2cap_le_connect(chan);
1353 	}
1354 }
1355 
1356 static void l2cap_start_connection(struct l2cap_chan *chan)
1357 {
1358 	if (chan->conn->hcon->type == LE_LINK) {
1359 		l2cap_le_start(chan);
1360 	} else {
1361 		l2cap_send_conn_req(chan);
1362 	}
1363 }
1364 
1365 static void l2cap_request_info(struct l2cap_conn *conn)
1366 {
1367 	struct l2cap_info_req req;
1368 
1369 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1370 		return;
1371 
1372 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1373 
1374 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1375 	conn->info_ident = l2cap_get_ident(conn);
1376 
1377 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1378 
1379 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1380 		       sizeof(req), &req);
1381 }
1382 
1383 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1384 {
1385 	/* The minimum encryption key size needs to be enforced by the
1386 	 * host stack before establishing any L2CAP connections. The
1387 	 * specification in theory allows a minimum of 1, but to align
1388 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1389 	 *
1390 	 * This check might also be called for unencrypted connections
1391 	 * that have no key size requirements. Ensure that the link is
1392 	 * actually encrypted before enforcing a key size.
1393 	 */
1394 	int min_key_size = hcon->hdev->min_enc_key_size;
1395 
1396 	/* On FIPS security level, key size must be 16 bytes */
1397 	if (hcon->sec_level == BT_SECURITY_FIPS)
1398 		min_key_size = 16;
1399 
1400 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1401 		hcon->enc_key_size >= min_key_size);
1402 }
1403 
1404 static void l2cap_do_start(struct l2cap_chan *chan)
1405 {
1406 	struct l2cap_conn *conn = chan->conn;
1407 
1408 	if (conn->hcon->type == LE_LINK) {
1409 		l2cap_le_start(chan);
1410 		return;
1411 	}
1412 
1413 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1414 		l2cap_request_info(conn);
1415 		return;
1416 	}
1417 
1418 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1419 		return;
1420 
1421 	if (!l2cap_chan_check_security(chan, true) ||
1422 	    !__l2cap_no_conn_pending(chan))
1423 		return;
1424 
1425 	if (l2cap_check_enc_key_size(conn->hcon))
1426 		l2cap_start_connection(chan);
1427 	else
1428 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1429 }
1430 
1431 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1432 {
1433 	u32 local_feat_mask = l2cap_feat_mask;
1434 	if (!disable_ertm)
1435 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1436 
1437 	switch (mode) {
1438 	case L2CAP_MODE_ERTM:
1439 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1440 	case L2CAP_MODE_STREAMING:
1441 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1442 	default:
1443 		return 0x00;
1444 	}
1445 }
1446 
1447 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1448 {
1449 	struct l2cap_conn *conn = chan->conn;
1450 	struct l2cap_disconn_req req;
1451 
1452 	if (!conn)
1453 		return;
1454 
1455 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1456 		__clear_retrans_timer(chan);
1457 		__clear_monitor_timer(chan);
1458 		__clear_ack_timer(chan);
1459 	}
1460 
1461 	req.dcid = cpu_to_le16(chan->dcid);
1462 	req.scid = cpu_to_le16(chan->scid);
1463 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1464 		       sizeof(req), &req);
1465 
1466 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1467 }
1468 
1469 /* ---- L2CAP connections ---- */
1470 static void l2cap_conn_start(struct l2cap_conn *conn)
1471 {
1472 	struct l2cap_chan *chan, *tmp;
1473 
1474 	BT_DBG("conn %p", conn);
1475 
1476 	mutex_lock(&conn->chan_lock);
1477 
1478 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1479 		l2cap_chan_lock(chan);
1480 
1481 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1482 			l2cap_chan_ready(chan);
1483 			l2cap_chan_unlock(chan);
1484 			continue;
1485 		}
1486 
1487 		if (chan->state == BT_CONNECT) {
1488 			if (!l2cap_chan_check_security(chan, true) ||
1489 			    !__l2cap_no_conn_pending(chan)) {
1490 				l2cap_chan_unlock(chan);
1491 				continue;
1492 			}
1493 
1494 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1495 			    && test_bit(CONF_STATE2_DEVICE,
1496 					&chan->conf_state)) {
1497 				l2cap_chan_close(chan, ECONNRESET);
1498 				l2cap_chan_unlock(chan);
1499 				continue;
1500 			}
1501 
1502 			if (l2cap_check_enc_key_size(conn->hcon))
1503 				l2cap_start_connection(chan);
1504 			else
1505 				l2cap_chan_close(chan, ECONNREFUSED);
1506 
1507 		} else if (chan->state == BT_CONNECT2) {
1508 			struct l2cap_conn_rsp rsp;
1509 			char buf[128];
1510 			rsp.scid = cpu_to_le16(chan->dcid);
1511 			rsp.dcid = cpu_to_le16(chan->scid);
1512 
1513 			if (l2cap_chan_check_security(chan, false)) {
1514 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1515 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1516 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1517 					chan->ops->defer(chan);
1518 
1519 				} else {
1520 					l2cap_state_change(chan, BT_CONFIG);
1521 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1522 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1523 				}
1524 			} else {
1525 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1526 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1527 			}
1528 
1529 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1530 				       sizeof(rsp), &rsp);
1531 
1532 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1533 			    rsp.result != L2CAP_CR_SUCCESS) {
1534 				l2cap_chan_unlock(chan);
1535 				continue;
1536 			}
1537 
1538 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1539 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1540 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1541 			chan->num_conf_req++;
1542 		}
1543 
1544 		l2cap_chan_unlock(chan);
1545 	}
1546 
1547 	mutex_unlock(&conn->chan_lock);
1548 }
1549 
1550 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1551 {
1552 	struct hci_conn *hcon = conn->hcon;
1553 	struct hci_dev *hdev = hcon->hdev;
1554 
1555 	BT_DBG("%s conn %p", hdev->name, conn);
1556 
1557 	/* For outgoing pairing which doesn't necessarily have an
1558 	 * associated socket (e.g. mgmt_pair_device).
1559 	 */
1560 	if (hcon->out)
1561 		smp_conn_security(hcon, hcon->pending_sec_level);
1562 
1563 	/* For LE peripheral connections, make sure the connection interval
1564 	 * is in the range of the minimum and maximum interval that has
1565 	 * been configured for this connection. If not, then trigger
1566 	 * the connection update procedure.
1567 	 */
1568 	if (hcon->role == HCI_ROLE_SLAVE &&
1569 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1570 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1571 		struct l2cap_conn_param_update_req req;
1572 
1573 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1574 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1575 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1576 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1577 
1578 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1579 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1580 	}
1581 }
1582 
1583 static void l2cap_conn_ready(struct l2cap_conn *conn)
1584 {
1585 	struct l2cap_chan *chan;
1586 	struct hci_conn *hcon = conn->hcon;
1587 
1588 	BT_DBG("conn %p", conn);
1589 
1590 	if (hcon->type == ACL_LINK)
1591 		l2cap_request_info(conn);
1592 
1593 	mutex_lock(&conn->chan_lock);
1594 
1595 	list_for_each_entry(chan, &conn->chan_l, list) {
1596 
1597 		l2cap_chan_lock(chan);
1598 
1599 		if (hcon->type == LE_LINK) {
1600 			l2cap_le_start(chan);
1601 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1602 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1603 				l2cap_chan_ready(chan);
1604 		} else if (chan->state == BT_CONNECT) {
1605 			l2cap_do_start(chan);
1606 		}
1607 
1608 		l2cap_chan_unlock(chan);
1609 	}
1610 
1611 	mutex_unlock(&conn->chan_lock);
1612 
1613 	if (hcon->type == LE_LINK)
1614 		l2cap_le_conn_ready(conn);
1615 
1616 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1617 }
1618 
1619 /* Notify sockets that we cannot guaranty reliability anymore */
1620 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1621 {
1622 	struct l2cap_chan *chan;
1623 
1624 	BT_DBG("conn %p", conn);
1625 
1626 	mutex_lock(&conn->chan_lock);
1627 
1628 	list_for_each_entry(chan, &conn->chan_l, list) {
1629 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1630 			l2cap_chan_set_err(chan, err);
1631 	}
1632 
1633 	mutex_unlock(&conn->chan_lock);
1634 }
1635 
1636 static void l2cap_info_timeout(struct work_struct *work)
1637 {
1638 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1639 					       info_timer.work);
1640 
1641 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1642 	conn->info_ident = 0;
1643 
1644 	l2cap_conn_start(conn);
1645 }
1646 
1647 /*
1648  * l2cap_user
1649  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1650  * callback is called during registration. The ->remove callback is called
1651  * during unregistration.
1652  * An l2cap_user object can either be explicitly unregistered or when the
1653  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1654  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1655  * External modules must own a reference to the l2cap_conn object if they intend
1656  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1657  * any time if they don't.
1658  */
1659 
1660 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1661 {
1662 	struct hci_dev *hdev = conn->hcon->hdev;
1663 	int ret;
1664 
1665 	/* We need to check whether l2cap_conn is registered. If it is not, we
1666 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1667 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1668 	 * relies on the parent hci_conn object to be locked. This itself relies
1669 	 * on the hci_dev object to be locked. So we must lock the hci device
1670 	 * here, too. */
1671 
1672 	hci_dev_lock(hdev);
1673 
1674 	if (!list_empty(&user->list)) {
1675 		ret = -EINVAL;
1676 		goto out_unlock;
1677 	}
1678 
1679 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1680 	if (!conn->hchan) {
1681 		ret = -ENODEV;
1682 		goto out_unlock;
1683 	}
1684 
1685 	ret = user->probe(conn, user);
1686 	if (ret)
1687 		goto out_unlock;
1688 
1689 	list_add(&user->list, &conn->users);
1690 	ret = 0;
1691 
1692 out_unlock:
1693 	hci_dev_unlock(hdev);
1694 	return ret;
1695 }
1696 EXPORT_SYMBOL(l2cap_register_user);
1697 
1698 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1699 {
1700 	struct hci_dev *hdev = conn->hcon->hdev;
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (list_empty(&user->list))
1705 		goto out_unlock;
1706 
1707 	list_del_init(&user->list);
1708 	user->remove(conn, user);
1709 
1710 out_unlock:
1711 	hci_dev_unlock(hdev);
1712 }
1713 EXPORT_SYMBOL(l2cap_unregister_user);
1714 
1715 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1716 {
1717 	struct l2cap_user *user;
1718 
1719 	while (!list_empty(&conn->users)) {
1720 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1721 		list_del_init(&user->list);
1722 		user->remove(conn, user);
1723 	}
1724 }
1725 
1726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1727 {
1728 	struct l2cap_conn *conn = hcon->l2cap_data;
1729 	struct l2cap_chan *chan, *l;
1730 
1731 	if (!conn)
1732 		return;
1733 
1734 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1735 
1736 	kfree_skb(conn->rx_skb);
1737 
1738 	skb_queue_purge(&conn->pending_rx);
1739 
1740 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1741 	 * might block if we are running on a worker from the same workqueue
1742 	 * pending_rx_work is waiting on.
1743 	 */
1744 	if (work_pending(&conn->pending_rx_work))
1745 		cancel_work_sync(&conn->pending_rx_work);
1746 
1747 	cancel_delayed_work_sync(&conn->id_addr_timer);
1748 
1749 	l2cap_unregister_all_users(conn);
1750 
1751 	/* Force the connection to be immediately dropped */
1752 	hcon->disc_timeout = 0;
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	/* Kill channels */
1757 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1758 		l2cap_chan_hold(chan);
1759 		l2cap_chan_lock(chan);
1760 
1761 		l2cap_chan_del(chan, err);
1762 
1763 		chan->ops->close(chan);
1764 
1765 		l2cap_chan_unlock(chan);
1766 		l2cap_chan_put(chan);
1767 	}
1768 
1769 	mutex_unlock(&conn->chan_lock);
1770 
1771 	hci_chan_del(conn->hchan);
1772 
1773 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1774 		cancel_delayed_work_sync(&conn->info_timer);
1775 
1776 	hcon->l2cap_data = NULL;
1777 	conn->hchan = NULL;
1778 	l2cap_conn_put(conn);
1779 }
1780 
1781 static void l2cap_conn_free(struct kref *ref)
1782 {
1783 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1784 
1785 	hci_conn_put(conn->hcon);
1786 	kfree(conn);
1787 }
1788 
1789 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1790 {
1791 	kref_get(&conn->ref);
1792 	return conn;
1793 }
1794 EXPORT_SYMBOL(l2cap_conn_get);
1795 
1796 void l2cap_conn_put(struct l2cap_conn *conn)
1797 {
1798 	kref_put(&conn->ref, l2cap_conn_free);
1799 }
1800 EXPORT_SYMBOL(l2cap_conn_put);
1801 
1802 /* ---- Socket interface ---- */
1803 
1804 /* Find socket with psm and source / destination bdaddr.
1805  * Returns closest match.
1806  */
1807 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1808 						   bdaddr_t *src,
1809 						   bdaddr_t *dst,
1810 						   u8 link_type)
1811 {
1812 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1813 
1814 	read_lock(&chan_list_lock);
1815 
1816 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1817 		if (state && c->state != state)
1818 			continue;
1819 
1820 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1821 			continue;
1822 
1823 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1824 			continue;
1825 
1826 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1827 			int src_match, dst_match;
1828 			int src_any, dst_any;
1829 
1830 			/* Exact match. */
1831 			src_match = !bacmp(&c->src, src);
1832 			dst_match = !bacmp(&c->dst, dst);
1833 			if (src_match && dst_match) {
1834 				if (!l2cap_chan_hold_unless_zero(c))
1835 					continue;
1836 
1837 				read_unlock(&chan_list_lock);
1838 				return c;
1839 			}
1840 
1841 			/* Closest match */
1842 			src_any = !bacmp(&c->src, BDADDR_ANY);
1843 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1844 			if ((src_match && dst_any) || (src_any && dst_match) ||
1845 			    (src_any && dst_any))
1846 				c1 = c;
1847 		}
1848 	}
1849 
1850 	if (c1)
1851 		c1 = l2cap_chan_hold_unless_zero(c1);
1852 
1853 	read_unlock(&chan_list_lock);
1854 
1855 	return c1;
1856 }
1857 
1858 static void l2cap_monitor_timeout(struct work_struct *work)
1859 {
1860 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1861 					       monitor_timer.work);
1862 
1863 	BT_DBG("chan %p", chan);
1864 
1865 	l2cap_chan_lock(chan);
1866 
1867 	if (!chan->conn) {
1868 		l2cap_chan_unlock(chan);
1869 		l2cap_chan_put(chan);
1870 		return;
1871 	}
1872 
1873 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1874 
1875 	l2cap_chan_unlock(chan);
1876 	l2cap_chan_put(chan);
1877 }
1878 
1879 static void l2cap_retrans_timeout(struct work_struct *work)
1880 {
1881 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1882 					       retrans_timer.work);
1883 
1884 	BT_DBG("chan %p", chan);
1885 
1886 	l2cap_chan_lock(chan);
1887 
1888 	if (!chan->conn) {
1889 		l2cap_chan_unlock(chan);
1890 		l2cap_chan_put(chan);
1891 		return;
1892 	}
1893 
1894 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1895 	l2cap_chan_unlock(chan);
1896 	l2cap_chan_put(chan);
1897 }
1898 
1899 static void l2cap_streaming_send(struct l2cap_chan *chan,
1900 				 struct sk_buff_head *skbs)
1901 {
1902 	struct sk_buff *skb;
1903 	struct l2cap_ctrl *control;
1904 
1905 	BT_DBG("chan %p, skbs %p", chan, skbs);
1906 
1907 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1908 
1909 	while (!skb_queue_empty(&chan->tx_q)) {
1910 
1911 		skb = skb_dequeue(&chan->tx_q);
1912 
1913 		bt_cb(skb)->l2cap.retries = 1;
1914 		control = &bt_cb(skb)->l2cap;
1915 
1916 		control->reqseq = 0;
1917 		control->txseq = chan->next_tx_seq;
1918 
1919 		__pack_control(chan, control, skb);
1920 
1921 		if (chan->fcs == L2CAP_FCS_CRC16) {
1922 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1923 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1924 		}
1925 
1926 		l2cap_do_send(chan, skb);
1927 
1928 		BT_DBG("Sent txseq %u", control->txseq);
1929 
1930 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1931 		chan->frames_sent++;
1932 	}
1933 }
1934 
1935 static int l2cap_ertm_send(struct l2cap_chan *chan)
1936 {
1937 	struct sk_buff *skb, *tx_skb;
1938 	struct l2cap_ctrl *control;
1939 	int sent = 0;
1940 
1941 	BT_DBG("chan %p", chan);
1942 
1943 	if (chan->state != BT_CONNECTED)
1944 		return -ENOTCONN;
1945 
1946 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 		return 0;
1948 
1949 	while (chan->tx_send_head &&
1950 	       chan->unacked_frames < chan->remote_tx_win &&
1951 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1952 
1953 		skb = chan->tx_send_head;
1954 
1955 		bt_cb(skb)->l2cap.retries = 1;
1956 		control = &bt_cb(skb)->l2cap;
1957 
1958 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1959 			control->final = 1;
1960 
1961 		control->reqseq = chan->buffer_seq;
1962 		chan->last_acked_seq = chan->buffer_seq;
1963 		control->txseq = chan->next_tx_seq;
1964 
1965 		__pack_control(chan, control, skb);
1966 
1967 		if (chan->fcs == L2CAP_FCS_CRC16) {
1968 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1969 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1970 		}
1971 
1972 		/* Clone after data has been modified. Data is assumed to be
1973 		   read-only (for locking purposes) on cloned sk_buffs.
1974 		 */
1975 		tx_skb = skb_clone(skb, GFP_KERNEL);
1976 
1977 		if (!tx_skb)
1978 			break;
1979 
1980 		__set_retrans_timer(chan);
1981 
1982 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1983 		chan->unacked_frames++;
1984 		chan->frames_sent++;
1985 		sent++;
1986 
1987 		if (skb_queue_is_last(&chan->tx_q, skb))
1988 			chan->tx_send_head = NULL;
1989 		else
1990 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1991 
1992 		l2cap_do_send(chan, tx_skb);
1993 		BT_DBG("Sent txseq %u", control->txseq);
1994 	}
1995 
1996 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1997 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1998 
1999 	return sent;
2000 }
2001 
2002 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2003 {
2004 	struct l2cap_ctrl control;
2005 	struct sk_buff *skb;
2006 	struct sk_buff *tx_skb;
2007 	u16 seq;
2008 
2009 	BT_DBG("chan %p", chan);
2010 
2011 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2012 		return;
2013 
2014 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2015 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2016 
2017 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2018 		if (!skb) {
2019 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2020 			       seq);
2021 			continue;
2022 		}
2023 
2024 		bt_cb(skb)->l2cap.retries++;
2025 		control = bt_cb(skb)->l2cap;
2026 
2027 		if (chan->max_tx != 0 &&
2028 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2029 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2030 			l2cap_send_disconn_req(chan, ECONNRESET);
2031 			l2cap_seq_list_clear(&chan->retrans_list);
2032 			break;
2033 		}
2034 
2035 		control.reqseq = chan->buffer_seq;
2036 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2037 			control.final = 1;
2038 		else
2039 			control.final = 0;
2040 
2041 		if (skb_cloned(skb)) {
2042 			/* Cloned sk_buffs are read-only, so we need a
2043 			 * writeable copy
2044 			 */
2045 			tx_skb = skb_copy(skb, GFP_KERNEL);
2046 		} else {
2047 			tx_skb = skb_clone(skb, GFP_KERNEL);
2048 		}
2049 
2050 		if (!tx_skb) {
2051 			l2cap_seq_list_clear(&chan->retrans_list);
2052 			break;
2053 		}
2054 
2055 		/* Update skb contents */
2056 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2057 			put_unaligned_le32(__pack_extended_control(&control),
2058 					   tx_skb->data + L2CAP_HDR_SIZE);
2059 		} else {
2060 			put_unaligned_le16(__pack_enhanced_control(&control),
2061 					   tx_skb->data + L2CAP_HDR_SIZE);
2062 		}
2063 
2064 		/* Update FCS */
2065 		if (chan->fcs == L2CAP_FCS_CRC16) {
2066 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2067 					tx_skb->len - L2CAP_FCS_SIZE);
2068 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2069 						L2CAP_FCS_SIZE);
2070 		}
2071 
2072 		l2cap_do_send(chan, tx_skb);
2073 
2074 		BT_DBG("Resent txseq %d", control.txseq);
2075 
2076 		chan->last_acked_seq = chan->buffer_seq;
2077 	}
2078 }
2079 
2080 static void l2cap_retransmit(struct l2cap_chan *chan,
2081 			     struct l2cap_ctrl *control)
2082 {
2083 	BT_DBG("chan %p, control %p", chan, control);
2084 
2085 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2086 	l2cap_ertm_resend(chan);
2087 }
2088 
2089 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2090 				 struct l2cap_ctrl *control)
2091 {
2092 	struct sk_buff *skb;
2093 
2094 	BT_DBG("chan %p, control %p", chan, control);
2095 
2096 	if (control->poll)
2097 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2098 
2099 	l2cap_seq_list_clear(&chan->retrans_list);
2100 
2101 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2102 		return;
2103 
2104 	if (chan->unacked_frames) {
2105 		skb_queue_walk(&chan->tx_q, skb) {
2106 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2107 			    skb == chan->tx_send_head)
2108 				break;
2109 		}
2110 
2111 		skb_queue_walk_from(&chan->tx_q, skb) {
2112 			if (skb == chan->tx_send_head)
2113 				break;
2114 
2115 			l2cap_seq_list_append(&chan->retrans_list,
2116 					      bt_cb(skb)->l2cap.txseq);
2117 		}
2118 
2119 		l2cap_ertm_resend(chan);
2120 	}
2121 }
2122 
2123 static void l2cap_send_ack(struct l2cap_chan *chan)
2124 {
2125 	struct l2cap_ctrl control;
2126 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2127 					 chan->last_acked_seq);
2128 	int threshold;
2129 
2130 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2131 	       chan, chan->last_acked_seq, chan->buffer_seq);
2132 
2133 	memset(&control, 0, sizeof(control));
2134 	control.sframe = 1;
2135 
2136 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2137 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2138 		__clear_ack_timer(chan);
2139 		control.super = L2CAP_SUPER_RNR;
2140 		control.reqseq = chan->buffer_seq;
2141 		l2cap_send_sframe(chan, &control);
2142 	} else {
2143 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2144 			l2cap_ertm_send(chan);
2145 			/* If any i-frames were sent, they included an ack */
2146 			if (chan->buffer_seq == chan->last_acked_seq)
2147 				frames_to_ack = 0;
2148 		}
2149 
2150 		/* Ack now if the window is 3/4ths full.
2151 		 * Calculate without mul or div
2152 		 */
2153 		threshold = chan->ack_win;
2154 		threshold += threshold << 1;
2155 		threshold >>= 2;
2156 
2157 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2158 		       threshold);
2159 
2160 		if (frames_to_ack >= threshold) {
2161 			__clear_ack_timer(chan);
2162 			control.super = L2CAP_SUPER_RR;
2163 			control.reqseq = chan->buffer_seq;
2164 			l2cap_send_sframe(chan, &control);
2165 			frames_to_ack = 0;
2166 		}
2167 
2168 		if (frames_to_ack)
2169 			__set_ack_timer(chan);
2170 	}
2171 }
2172 
2173 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2174 					 struct msghdr *msg, int len,
2175 					 int count, struct sk_buff *skb)
2176 {
2177 	struct l2cap_conn *conn = chan->conn;
2178 	struct sk_buff **frag;
2179 	int sent = 0;
2180 
2181 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2182 		return -EFAULT;
2183 
2184 	sent += count;
2185 	len  -= count;
2186 
2187 	/* Continuation fragments (no L2CAP header) */
2188 	frag = &skb_shinfo(skb)->frag_list;
2189 	while (len) {
2190 		struct sk_buff *tmp;
2191 
2192 		count = min_t(unsigned int, conn->mtu, len);
2193 
2194 		tmp = chan->ops->alloc_skb(chan, 0, count,
2195 					   msg->msg_flags & MSG_DONTWAIT);
2196 		if (IS_ERR(tmp))
2197 			return PTR_ERR(tmp);
2198 
2199 		*frag = tmp;
2200 
2201 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2202 				   &msg->msg_iter))
2203 			return -EFAULT;
2204 
2205 		sent += count;
2206 		len  -= count;
2207 
2208 		skb->len += (*frag)->len;
2209 		skb->data_len += (*frag)->len;
2210 
2211 		frag = &(*frag)->next;
2212 	}
2213 
2214 	return sent;
2215 }
2216 
2217 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2218 						 struct msghdr *msg, size_t len)
2219 {
2220 	struct l2cap_conn *conn = chan->conn;
2221 	struct sk_buff *skb;
2222 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2223 	struct l2cap_hdr *lh;
2224 
2225 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2226 	       __le16_to_cpu(chan->psm), len);
2227 
2228 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2229 
2230 	skb = chan->ops->alloc_skb(chan, hlen, count,
2231 				   msg->msg_flags & MSG_DONTWAIT);
2232 	if (IS_ERR(skb))
2233 		return skb;
2234 
2235 	/* Create L2CAP header */
2236 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2237 	lh->cid = cpu_to_le16(chan->dcid);
2238 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2239 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2240 
2241 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2242 	if (unlikely(err < 0)) {
2243 		kfree_skb(skb);
2244 		return ERR_PTR(err);
2245 	}
2246 	return skb;
2247 }
2248 
2249 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2250 					      struct msghdr *msg, size_t len)
2251 {
2252 	struct l2cap_conn *conn = chan->conn;
2253 	struct sk_buff *skb;
2254 	int err, count;
2255 	struct l2cap_hdr *lh;
2256 
2257 	BT_DBG("chan %p len %zu", chan, len);
2258 
2259 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2260 
2261 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2262 				   msg->msg_flags & MSG_DONTWAIT);
2263 	if (IS_ERR(skb))
2264 		return skb;
2265 
2266 	/* Create L2CAP header */
2267 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2268 	lh->cid = cpu_to_le16(chan->dcid);
2269 	lh->len = cpu_to_le16(len);
2270 
2271 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 	if (unlikely(err < 0)) {
2273 		kfree_skb(skb);
2274 		return ERR_PTR(err);
2275 	}
2276 	return skb;
2277 }
2278 
2279 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2280 					       struct msghdr *msg, size_t len,
2281 					       u16 sdulen)
2282 {
2283 	struct l2cap_conn *conn = chan->conn;
2284 	struct sk_buff *skb;
2285 	int err, count, hlen;
2286 	struct l2cap_hdr *lh;
2287 
2288 	BT_DBG("chan %p len %zu", chan, len);
2289 
2290 	if (!conn)
2291 		return ERR_PTR(-ENOTCONN);
2292 
2293 	hlen = __ertm_hdr_size(chan);
2294 
2295 	if (sdulen)
2296 		hlen += L2CAP_SDULEN_SIZE;
2297 
2298 	if (chan->fcs == L2CAP_FCS_CRC16)
2299 		hlen += L2CAP_FCS_SIZE;
2300 
2301 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2302 
2303 	skb = chan->ops->alloc_skb(chan, hlen, count,
2304 				   msg->msg_flags & MSG_DONTWAIT);
2305 	if (IS_ERR(skb))
2306 		return skb;
2307 
2308 	/* Create L2CAP header */
2309 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2310 	lh->cid = cpu_to_le16(chan->dcid);
2311 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2312 
2313 	/* Control header is populated later */
2314 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2315 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2316 	else
2317 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2318 
2319 	if (sdulen)
2320 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2321 
2322 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2323 	if (unlikely(err < 0)) {
2324 		kfree_skb(skb);
2325 		return ERR_PTR(err);
2326 	}
2327 
2328 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2329 	bt_cb(skb)->l2cap.retries = 0;
2330 	return skb;
2331 }
2332 
2333 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2334 			     struct sk_buff_head *seg_queue,
2335 			     struct msghdr *msg, size_t len)
2336 {
2337 	struct sk_buff *skb;
2338 	u16 sdu_len;
2339 	size_t pdu_len;
2340 	u8 sar;
2341 
2342 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2343 
2344 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2345 	 * so fragmented skbs are not used.  The HCI layer's handling
2346 	 * of fragmented skbs is not compatible with ERTM's queueing.
2347 	 */
2348 
2349 	/* PDU size is derived from the HCI MTU */
2350 	pdu_len = chan->conn->mtu;
2351 
2352 	/* Constrain PDU size for BR/EDR connections */
2353 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2354 
2355 	/* Adjust for largest possible L2CAP overhead. */
2356 	if (chan->fcs)
2357 		pdu_len -= L2CAP_FCS_SIZE;
2358 
2359 	pdu_len -= __ertm_hdr_size(chan);
2360 
2361 	/* Remote device may have requested smaller PDUs */
2362 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2363 
2364 	if (len <= pdu_len) {
2365 		sar = L2CAP_SAR_UNSEGMENTED;
2366 		sdu_len = 0;
2367 		pdu_len = len;
2368 	} else {
2369 		sar = L2CAP_SAR_START;
2370 		sdu_len = len;
2371 	}
2372 
2373 	while (len > 0) {
2374 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2375 
2376 		if (IS_ERR(skb)) {
2377 			__skb_queue_purge(seg_queue);
2378 			return PTR_ERR(skb);
2379 		}
2380 
2381 		bt_cb(skb)->l2cap.sar = sar;
2382 		__skb_queue_tail(seg_queue, skb);
2383 
2384 		len -= pdu_len;
2385 		if (sdu_len)
2386 			sdu_len = 0;
2387 
2388 		if (len <= pdu_len) {
2389 			sar = L2CAP_SAR_END;
2390 			pdu_len = len;
2391 		} else {
2392 			sar = L2CAP_SAR_CONTINUE;
2393 		}
2394 	}
2395 
2396 	return 0;
2397 }
2398 
2399 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2400 						   struct msghdr *msg,
2401 						   size_t len, u16 sdulen)
2402 {
2403 	struct l2cap_conn *conn = chan->conn;
2404 	struct sk_buff *skb;
2405 	int err, count, hlen;
2406 	struct l2cap_hdr *lh;
2407 
2408 	BT_DBG("chan %p len %zu", chan, len);
2409 
2410 	if (!conn)
2411 		return ERR_PTR(-ENOTCONN);
2412 
2413 	hlen = L2CAP_HDR_SIZE;
2414 
2415 	if (sdulen)
2416 		hlen += L2CAP_SDULEN_SIZE;
2417 
2418 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2419 
2420 	skb = chan->ops->alloc_skb(chan, hlen, count,
2421 				   msg->msg_flags & MSG_DONTWAIT);
2422 	if (IS_ERR(skb))
2423 		return skb;
2424 
2425 	/* Create L2CAP header */
2426 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2427 	lh->cid = cpu_to_le16(chan->dcid);
2428 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2429 
2430 	if (sdulen)
2431 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2432 
2433 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2434 	if (unlikely(err < 0)) {
2435 		kfree_skb(skb);
2436 		return ERR_PTR(err);
2437 	}
2438 
2439 	return skb;
2440 }
2441 
2442 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2443 				struct sk_buff_head *seg_queue,
2444 				struct msghdr *msg, size_t len)
2445 {
2446 	struct sk_buff *skb;
2447 	size_t pdu_len;
2448 	u16 sdu_len;
2449 
2450 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2451 
2452 	sdu_len = len;
2453 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2454 
2455 	while (len > 0) {
2456 		if (len <= pdu_len)
2457 			pdu_len = len;
2458 
2459 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2460 		if (IS_ERR(skb)) {
2461 			__skb_queue_purge(seg_queue);
2462 			return PTR_ERR(skb);
2463 		}
2464 
2465 		__skb_queue_tail(seg_queue, skb);
2466 
2467 		len -= pdu_len;
2468 
2469 		if (sdu_len) {
2470 			sdu_len = 0;
2471 			pdu_len += L2CAP_SDULEN_SIZE;
2472 		}
2473 	}
2474 
2475 	return 0;
2476 }
2477 
2478 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2479 {
2480 	int sent = 0;
2481 
2482 	BT_DBG("chan %p", chan);
2483 
2484 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2485 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2486 		chan->tx_credits--;
2487 		sent++;
2488 	}
2489 
2490 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2491 	       skb_queue_len(&chan->tx_q));
2492 }
2493 
2494 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2495 {
2496 	struct sk_buff *skb;
2497 	int err;
2498 	struct sk_buff_head seg_queue;
2499 
2500 	if (!chan->conn)
2501 		return -ENOTCONN;
2502 
2503 	/* Connectionless channel */
2504 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2505 		skb = l2cap_create_connless_pdu(chan, msg, len);
2506 		if (IS_ERR(skb))
2507 			return PTR_ERR(skb);
2508 
2509 		l2cap_do_send(chan, skb);
2510 		return len;
2511 	}
2512 
2513 	switch (chan->mode) {
2514 	case L2CAP_MODE_LE_FLOWCTL:
2515 	case L2CAP_MODE_EXT_FLOWCTL:
2516 		/* Check outgoing MTU */
2517 		if (len > chan->omtu)
2518 			return -EMSGSIZE;
2519 
2520 		__skb_queue_head_init(&seg_queue);
2521 
2522 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2523 
2524 		if (chan->state != BT_CONNECTED) {
2525 			__skb_queue_purge(&seg_queue);
2526 			err = -ENOTCONN;
2527 		}
2528 
2529 		if (err)
2530 			return err;
2531 
2532 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2533 
2534 		l2cap_le_flowctl_send(chan);
2535 
2536 		if (!chan->tx_credits)
2537 			chan->ops->suspend(chan);
2538 
2539 		err = len;
2540 
2541 		break;
2542 
2543 	case L2CAP_MODE_BASIC:
2544 		/* Check outgoing MTU */
2545 		if (len > chan->omtu)
2546 			return -EMSGSIZE;
2547 
2548 		/* Create a basic PDU */
2549 		skb = l2cap_create_basic_pdu(chan, msg, len);
2550 		if (IS_ERR(skb))
2551 			return PTR_ERR(skb);
2552 
2553 		l2cap_do_send(chan, skb);
2554 		err = len;
2555 		break;
2556 
2557 	case L2CAP_MODE_ERTM:
2558 	case L2CAP_MODE_STREAMING:
2559 		/* Check outgoing MTU */
2560 		if (len > chan->omtu) {
2561 			err = -EMSGSIZE;
2562 			break;
2563 		}
2564 
2565 		__skb_queue_head_init(&seg_queue);
2566 
2567 		/* Do segmentation before calling in to the state machine,
2568 		 * since it's possible to block while waiting for memory
2569 		 * allocation.
2570 		 */
2571 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2572 
2573 		if (err)
2574 			break;
2575 
2576 		if (chan->mode == L2CAP_MODE_ERTM)
2577 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2578 		else
2579 			l2cap_streaming_send(chan, &seg_queue);
2580 
2581 		err = len;
2582 
2583 		/* If the skbs were not queued for sending, they'll still be in
2584 		 * seg_queue and need to be purged.
2585 		 */
2586 		__skb_queue_purge(&seg_queue);
2587 		break;
2588 
2589 	default:
2590 		BT_DBG("bad state %1.1x", chan->mode);
2591 		err = -EBADFD;
2592 	}
2593 
2594 	return err;
2595 }
2596 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2597 
2598 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2599 {
2600 	struct l2cap_ctrl control;
2601 	u16 seq;
2602 
2603 	BT_DBG("chan %p, txseq %u", chan, txseq);
2604 
2605 	memset(&control, 0, sizeof(control));
2606 	control.sframe = 1;
2607 	control.super = L2CAP_SUPER_SREJ;
2608 
2609 	for (seq = chan->expected_tx_seq; seq != txseq;
2610 	     seq = __next_seq(chan, seq)) {
2611 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2612 			control.reqseq = seq;
2613 			l2cap_send_sframe(chan, &control);
2614 			l2cap_seq_list_append(&chan->srej_list, seq);
2615 		}
2616 	}
2617 
2618 	chan->expected_tx_seq = __next_seq(chan, txseq);
2619 }
2620 
2621 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2622 {
2623 	struct l2cap_ctrl control;
2624 
2625 	BT_DBG("chan %p", chan);
2626 
2627 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2628 		return;
2629 
2630 	memset(&control, 0, sizeof(control));
2631 	control.sframe = 1;
2632 	control.super = L2CAP_SUPER_SREJ;
2633 	control.reqseq = chan->srej_list.tail;
2634 	l2cap_send_sframe(chan, &control);
2635 }
2636 
2637 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2638 {
2639 	struct l2cap_ctrl control;
2640 	u16 initial_head;
2641 	u16 seq;
2642 
2643 	BT_DBG("chan %p, txseq %u", chan, txseq);
2644 
2645 	memset(&control, 0, sizeof(control));
2646 	control.sframe = 1;
2647 	control.super = L2CAP_SUPER_SREJ;
2648 
2649 	/* Capture initial list head to allow only one pass through the list. */
2650 	initial_head = chan->srej_list.head;
2651 
2652 	do {
2653 		seq = l2cap_seq_list_pop(&chan->srej_list);
2654 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2655 			break;
2656 
2657 		control.reqseq = seq;
2658 		l2cap_send_sframe(chan, &control);
2659 		l2cap_seq_list_append(&chan->srej_list, seq);
2660 	} while (chan->srej_list.head != initial_head);
2661 }
2662 
2663 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2664 {
2665 	struct sk_buff *acked_skb;
2666 	u16 ackseq;
2667 
2668 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2669 
2670 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2671 		return;
2672 
2673 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2674 	       chan->expected_ack_seq, chan->unacked_frames);
2675 
2676 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2677 	     ackseq = __next_seq(chan, ackseq)) {
2678 
2679 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2680 		if (acked_skb) {
2681 			skb_unlink(acked_skb, &chan->tx_q);
2682 			kfree_skb(acked_skb);
2683 			chan->unacked_frames--;
2684 		}
2685 	}
2686 
2687 	chan->expected_ack_seq = reqseq;
2688 
2689 	if (chan->unacked_frames == 0)
2690 		__clear_retrans_timer(chan);
2691 
2692 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2693 }
2694 
2695 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2696 {
2697 	BT_DBG("chan %p", chan);
2698 
2699 	chan->expected_tx_seq = chan->buffer_seq;
2700 	l2cap_seq_list_clear(&chan->srej_list);
2701 	skb_queue_purge(&chan->srej_q);
2702 	chan->rx_state = L2CAP_RX_STATE_RECV;
2703 }
2704 
2705 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2706 				struct l2cap_ctrl *control,
2707 				struct sk_buff_head *skbs, u8 event)
2708 {
2709 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2710 	       event);
2711 
2712 	switch (event) {
2713 	case L2CAP_EV_DATA_REQUEST:
2714 		if (chan->tx_send_head == NULL)
2715 			chan->tx_send_head = skb_peek(skbs);
2716 
2717 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2718 		l2cap_ertm_send(chan);
2719 		break;
2720 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 		BT_DBG("Enter LOCAL_BUSY");
2722 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723 
2724 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 			/* The SREJ_SENT state must be aborted if we are to
2726 			 * enter the LOCAL_BUSY state.
2727 			 */
2728 			l2cap_abort_rx_srej_sent(chan);
2729 		}
2730 
2731 		l2cap_send_ack(chan);
2732 
2733 		break;
2734 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 		BT_DBG("Exit LOCAL_BUSY");
2736 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2737 
2738 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 			struct l2cap_ctrl local_control;
2740 
2741 			memset(&local_control, 0, sizeof(local_control));
2742 			local_control.sframe = 1;
2743 			local_control.super = L2CAP_SUPER_RR;
2744 			local_control.poll = 1;
2745 			local_control.reqseq = chan->buffer_seq;
2746 			l2cap_send_sframe(chan, &local_control);
2747 
2748 			chan->retry_count = 1;
2749 			__set_monitor_timer(chan);
2750 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2751 		}
2752 		break;
2753 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2754 		l2cap_process_reqseq(chan, control->reqseq);
2755 		break;
2756 	case L2CAP_EV_EXPLICIT_POLL:
2757 		l2cap_send_rr_or_rnr(chan, 1);
2758 		chan->retry_count = 1;
2759 		__set_monitor_timer(chan);
2760 		__clear_ack_timer(chan);
2761 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2762 		break;
2763 	case L2CAP_EV_RETRANS_TO:
2764 		l2cap_send_rr_or_rnr(chan, 1);
2765 		chan->retry_count = 1;
2766 		__set_monitor_timer(chan);
2767 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2768 		break;
2769 	case L2CAP_EV_RECV_FBIT:
2770 		/* Nothing to process */
2771 		break;
2772 	default:
2773 		break;
2774 	}
2775 }
2776 
2777 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2778 				  struct l2cap_ctrl *control,
2779 				  struct sk_buff_head *skbs, u8 event)
2780 {
2781 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2782 	       event);
2783 
2784 	switch (event) {
2785 	case L2CAP_EV_DATA_REQUEST:
2786 		if (chan->tx_send_head == NULL)
2787 			chan->tx_send_head = skb_peek(skbs);
2788 		/* Queue data, but don't send. */
2789 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2790 		break;
2791 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2792 		BT_DBG("Enter LOCAL_BUSY");
2793 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2794 
2795 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2796 			/* The SREJ_SENT state must be aborted if we are to
2797 			 * enter the LOCAL_BUSY state.
2798 			 */
2799 			l2cap_abort_rx_srej_sent(chan);
2800 		}
2801 
2802 		l2cap_send_ack(chan);
2803 
2804 		break;
2805 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2806 		BT_DBG("Exit LOCAL_BUSY");
2807 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2808 
2809 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2810 			struct l2cap_ctrl local_control;
2811 			memset(&local_control, 0, sizeof(local_control));
2812 			local_control.sframe = 1;
2813 			local_control.super = L2CAP_SUPER_RR;
2814 			local_control.poll = 1;
2815 			local_control.reqseq = chan->buffer_seq;
2816 			l2cap_send_sframe(chan, &local_control);
2817 
2818 			chan->retry_count = 1;
2819 			__set_monitor_timer(chan);
2820 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2821 		}
2822 		break;
2823 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2824 		l2cap_process_reqseq(chan, control->reqseq);
2825 		fallthrough;
2826 
2827 	case L2CAP_EV_RECV_FBIT:
2828 		if (control && control->final) {
2829 			__clear_monitor_timer(chan);
2830 			if (chan->unacked_frames > 0)
2831 				__set_retrans_timer(chan);
2832 			chan->retry_count = 0;
2833 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2834 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2835 		}
2836 		break;
2837 	case L2CAP_EV_EXPLICIT_POLL:
2838 		/* Ignore */
2839 		break;
2840 	case L2CAP_EV_MONITOR_TO:
2841 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2842 			l2cap_send_rr_or_rnr(chan, 1);
2843 			__set_monitor_timer(chan);
2844 			chan->retry_count++;
2845 		} else {
2846 			l2cap_send_disconn_req(chan, ECONNABORTED);
2847 		}
2848 		break;
2849 	default:
2850 		break;
2851 	}
2852 }
2853 
2854 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2855 		     struct sk_buff_head *skbs, u8 event)
2856 {
2857 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2858 	       chan, control, skbs, event, chan->tx_state);
2859 
2860 	switch (chan->tx_state) {
2861 	case L2CAP_TX_STATE_XMIT:
2862 		l2cap_tx_state_xmit(chan, control, skbs, event);
2863 		break;
2864 	case L2CAP_TX_STATE_WAIT_F:
2865 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2866 		break;
2867 	default:
2868 		/* Ignore event */
2869 		break;
2870 	}
2871 }
2872 
2873 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2874 			     struct l2cap_ctrl *control)
2875 {
2876 	BT_DBG("chan %p, control %p", chan, control);
2877 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2878 }
2879 
2880 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2881 				  struct l2cap_ctrl *control)
2882 {
2883 	BT_DBG("chan %p, control %p", chan, control);
2884 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2885 }
2886 
2887 /* Copy frame to all raw sockets on that connection */
2888 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2889 {
2890 	struct sk_buff *nskb;
2891 	struct l2cap_chan *chan;
2892 
2893 	BT_DBG("conn %p", conn);
2894 
2895 	mutex_lock(&conn->chan_lock);
2896 
2897 	list_for_each_entry(chan, &conn->chan_l, list) {
2898 		if (chan->chan_type != L2CAP_CHAN_RAW)
2899 			continue;
2900 
2901 		/* Don't send frame to the channel it came from */
2902 		if (bt_cb(skb)->l2cap.chan == chan)
2903 			continue;
2904 
2905 		nskb = skb_clone(skb, GFP_KERNEL);
2906 		if (!nskb)
2907 			continue;
2908 		if (chan->ops->recv(chan, nskb))
2909 			kfree_skb(nskb);
2910 	}
2911 
2912 	mutex_unlock(&conn->chan_lock);
2913 }
2914 
2915 /* ---- L2CAP signalling commands ---- */
2916 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2917 				       u8 ident, u16 dlen, void *data)
2918 {
2919 	struct sk_buff *skb, **frag;
2920 	struct l2cap_cmd_hdr *cmd;
2921 	struct l2cap_hdr *lh;
2922 	int len, count;
2923 
2924 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2925 	       conn, code, ident, dlen);
2926 
2927 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2928 		return NULL;
2929 
2930 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2931 	count = min_t(unsigned int, conn->mtu, len);
2932 
2933 	skb = bt_skb_alloc(count, GFP_KERNEL);
2934 	if (!skb)
2935 		return NULL;
2936 
2937 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2938 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2939 
2940 	if (conn->hcon->type == LE_LINK)
2941 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2942 	else
2943 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2944 
2945 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2946 	cmd->code  = code;
2947 	cmd->ident = ident;
2948 	cmd->len   = cpu_to_le16(dlen);
2949 
2950 	if (dlen) {
2951 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2952 		skb_put_data(skb, data, count);
2953 		data += count;
2954 	}
2955 
2956 	len -= skb->len;
2957 
2958 	/* Continuation fragments (no L2CAP header) */
2959 	frag = &skb_shinfo(skb)->frag_list;
2960 	while (len) {
2961 		count = min_t(unsigned int, conn->mtu, len);
2962 
2963 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2964 		if (!*frag)
2965 			goto fail;
2966 
2967 		skb_put_data(*frag, data, count);
2968 
2969 		len  -= count;
2970 		data += count;
2971 
2972 		frag = &(*frag)->next;
2973 	}
2974 
2975 	return skb;
2976 
2977 fail:
2978 	kfree_skb(skb);
2979 	return NULL;
2980 }
2981 
2982 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2983 				     unsigned long *val)
2984 {
2985 	struct l2cap_conf_opt *opt = *ptr;
2986 	int len;
2987 
2988 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2989 	*ptr += len;
2990 
2991 	*type = opt->type;
2992 	*olen = opt->len;
2993 
2994 	switch (opt->len) {
2995 	case 1:
2996 		*val = *((u8 *) opt->val);
2997 		break;
2998 
2999 	case 2:
3000 		*val = get_unaligned_le16(opt->val);
3001 		break;
3002 
3003 	case 4:
3004 		*val = get_unaligned_le32(opt->val);
3005 		break;
3006 
3007 	default:
3008 		*val = (unsigned long) opt->val;
3009 		break;
3010 	}
3011 
3012 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3013 	return len;
3014 }
3015 
3016 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3017 {
3018 	struct l2cap_conf_opt *opt = *ptr;
3019 
3020 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3021 
3022 	if (size < L2CAP_CONF_OPT_SIZE + len)
3023 		return;
3024 
3025 	opt->type = type;
3026 	opt->len  = len;
3027 
3028 	switch (len) {
3029 	case 1:
3030 		*((u8 *) opt->val)  = val;
3031 		break;
3032 
3033 	case 2:
3034 		put_unaligned_le16(val, opt->val);
3035 		break;
3036 
3037 	case 4:
3038 		put_unaligned_le32(val, opt->val);
3039 		break;
3040 
3041 	default:
3042 		memcpy(opt->val, (void *) val, len);
3043 		break;
3044 	}
3045 
3046 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3047 }
3048 
3049 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3050 {
3051 	struct l2cap_conf_efs efs;
3052 
3053 	switch (chan->mode) {
3054 	case L2CAP_MODE_ERTM:
3055 		efs.id		= chan->local_id;
3056 		efs.stype	= chan->local_stype;
3057 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3058 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3059 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3060 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3061 		break;
3062 
3063 	case L2CAP_MODE_STREAMING:
3064 		efs.id		= 1;
3065 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3066 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3067 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3068 		efs.acc_lat	= 0;
3069 		efs.flush_to	= 0;
3070 		break;
3071 
3072 	default:
3073 		return;
3074 	}
3075 
3076 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3077 			   (unsigned long) &efs, size);
3078 }
3079 
3080 static void l2cap_ack_timeout(struct work_struct *work)
3081 {
3082 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3083 					       ack_timer.work);
3084 	u16 frames_to_ack;
3085 
3086 	BT_DBG("chan %p", chan);
3087 
3088 	l2cap_chan_lock(chan);
3089 
3090 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3091 				     chan->last_acked_seq);
3092 
3093 	if (frames_to_ack)
3094 		l2cap_send_rr_or_rnr(chan, 0);
3095 
3096 	l2cap_chan_unlock(chan);
3097 	l2cap_chan_put(chan);
3098 }
3099 
3100 int l2cap_ertm_init(struct l2cap_chan *chan)
3101 {
3102 	int err;
3103 
3104 	chan->next_tx_seq = 0;
3105 	chan->expected_tx_seq = 0;
3106 	chan->expected_ack_seq = 0;
3107 	chan->unacked_frames = 0;
3108 	chan->buffer_seq = 0;
3109 	chan->frames_sent = 0;
3110 	chan->last_acked_seq = 0;
3111 	chan->sdu = NULL;
3112 	chan->sdu_last_frag = NULL;
3113 	chan->sdu_len = 0;
3114 
3115 	skb_queue_head_init(&chan->tx_q);
3116 
3117 	if (chan->mode != L2CAP_MODE_ERTM)
3118 		return 0;
3119 
3120 	chan->rx_state = L2CAP_RX_STATE_RECV;
3121 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3122 
3123 	skb_queue_head_init(&chan->srej_q);
3124 
3125 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3126 	if (err < 0)
3127 		return err;
3128 
3129 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3130 	if (err < 0)
3131 		l2cap_seq_list_free(&chan->srej_list);
3132 
3133 	return err;
3134 }
3135 
3136 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3137 {
3138 	switch (mode) {
3139 	case L2CAP_MODE_STREAMING:
3140 	case L2CAP_MODE_ERTM:
3141 		if (l2cap_mode_supported(mode, remote_feat_mask))
3142 			return mode;
3143 		fallthrough;
3144 	default:
3145 		return L2CAP_MODE_BASIC;
3146 	}
3147 }
3148 
3149 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3150 {
3151 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3152 }
3153 
3154 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3155 {
3156 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3157 }
3158 
3159 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3160 				      struct l2cap_conf_rfc *rfc)
3161 {
3162 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3163 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3164 }
3165 
3166 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3167 {
3168 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3169 	    __l2cap_ews_supported(chan->conn)) {
3170 		/* use extended control field */
3171 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3172 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3173 	} else {
3174 		chan->tx_win = min_t(u16, chan->tx_win,
3175 				     L2CAP_DEFAULT_TX_WINDOW);
3176 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3177 	}
3178 	chan->ack_win = chan->tx_win;
3179 }
3180 
3181 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3182 {
3183 	struct hci_conn *conn = chan->conn->hcon;
3184 
3185 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3186 
3187 	/* The 2-DH1 packet has between 2 and 56 information bytes
3188 	 * (including the 2-byte payload header)
3189 	 */
3190 	if (!(conn->pkt_type & HCI_2DH1))
3191 		chan->imtu = 54;
3192 
3193 	/* The 3-DH1 packet has between 2 and 85 information bytes
3194 	 * (including the 2-byte payload header)
3195 	 */
3196 	if (!(conn->pkt_type & HCI_3DH1))
3197 		chan->imtu = 83;
3198 
3199 	/* The 2-DH3 packet has between 2 and 369 information bytes
3200 	 * (including the 2-byte payload header)
3201 	 */
3202 	if (!(conn->pkt_type & HCI_2DH3))
3203 		chan->imtu = 367;
3204 
3205 	/* The 3-DH3 packet has between 2 and 554 information bytes
3206 	 * (including the 2-byte payload header)
3207 	 */
3208 	if (!(conn->pkt_type & HCI_3DH3))
3209 		chan->imtu = 552;
3210 
3211 	/* The 2-DH5 packet has between 2 and 681 information bytes
3212 	 * (including the 2-byte payload header)
3213 	 */
3214 	if (!(conn->pkt_type & HCI_2DH5))
3215 		chan->imtu = 679;
3216 
3217 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3218 	 * (including the 2-byte payload header)
3219 	 */
3220 	if (!(conn->pkt_type & HCI_3DH5))
3221 		chan->imtu = 1021;
3222 }
3223 
3224 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3225 {
3226 	struct l2cap_conf_req *req = data;
3227 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3228 	void *ptr = req->data;
3229 	void *endptr = data + data_size;
3230 	u16 size;
3231 
3232 	BT_DBG("chan %p", chan);
3233 
3234 	if (chan->num_conf_req || chan->num_conf_rsp)
3235 		goto done;
3236 
3237 	switch (chan->mode) {
3238 	case L2CAP_MODE_STREAMING:
3239 	case L2CAP_MODE_ERTM:
3240 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3241 			break;
3242 
3243 		if (__l2cap_efs_supported(chan->conn))
3244 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3245 
3246 		fallthrough;
3247 	default:
3248 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3249 		break;
3250 	}
3251 
3252 done:
3253 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3254 		if (!chan->imtu)
3255 			l2cap_mtu_auto(chan);
3256 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3257 				   endptr - ptr);
3258 	}
3259 
3260 	switch (chan->mode) {
3261 	case L2CAP_MODE_BASIC:
3262 		if (disable_ertm)
3263 			break;
3264 
3265 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3266 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3267 			break;
3268 
3269 		rfc.mode            = L2CAP_MODE_BASIC;
3270 		rfc.txwin_size      = 0;
3271 		rfc.max_transmit    = 0;
3272 		rfc.retrans_timeout = 0;
3273 		rfc.monitor_timeout = 0;
3274 		rfc.max_pdu_size    = 0;
3275 
3276 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3277 				   (unsigned long) &rfc, endptr - ptr);
3278 		break;
3279 
3280 	case L2CAP_MODE_ERTM:
3281 		rfc.mode            = L2CAP_MODE_ERTM;
3282 		rfc.max_transmit    = chan->max_tx;
3283 
3284 		__l2cap_set_ertm_timeouts(chan, &rfc);
3285 
3286 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3287 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3288 			     L2CAP_FCS_SIZE);
3289 		rfc.max_pdu_size = cpu_to_le16(size);
3290 
3291 		l2cap_txwin_setup(chan);
3292 
3293 		rfc.txwin_size = min_t(u16, chan->tx_win,
3294 				       L2CAP_DEFAULT_TX_WINDOW);
3295 
3296 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3297 				   (unsigned long) &rfc, endptr - ptr);
3298 
3299 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3300 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3301 
3302 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3303 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3304 					   chan->tx_win, endptr - ptr);
3305 
3306 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3307 			if (chan->fcs == L2CAP_FCS_NONE ||
3308 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3309 				chan->fcs = L2CAP_FCS_NONE;
3310 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3311 						   chan->fcs, endptr - ptr);
3312 			}
3313 		break;
3314 
3315 	case L2CAP_MODE_STREAMING:
3316 		l2cap_txwin_setup(chan);
3317 		rfc.mode            = L2CAP_MODE_STREAMING;
3318 		rfc.txwin_size      = 0;
3319 		rfc.max_transmit    = 0;
3320 		rfc.retrans_timeout = 0;
3321 		rfc.monitor_timeout = 0;
3322 
3323 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3324 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3325 			     L2CAP_FCS_SIZE);
3326 		rfc.max_pdu_size = cpu_to_le16(size);
3327 
3328 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3329 				   (unsigned long) &rfc, endptr - ptr);
3330 
3331 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3332 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3333 
3334 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3335 			if (chan->fcs == L2CAP_FCS_NONE ||
3336 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3337 				chan->fcs = L2CAP_FCS_NONE;
3338 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3339 						   chan->fcs, endptr - ptr);
3340 			}
3341 		break;
3342 	}
3343 
3344 	req->dcid  = cpu_to_le16(chan->dcid);
3345 	req->flags = cpu_to_le16(0);
3346 
3347 	return ptr - data;
3348 }
3349 
3350 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3351 {
3352 	struct l2cap_conf_rsp *rsp = data;
3353 	void *ptr = rsp->data;
3354 	void *endptr = data + data_size;
3355 	void *req = chan->conf_req;
3356 	int len = chan->conf_len;
3357 	int type, hint, olen;
3358 	unsigned long val;
3359 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3360 	struct l2cap_conf_efs efs;
3361 	u8 remote_efs = 0;
3362 	u16 mtu = L2CAP_DEFAULT_MTU;
3363 	u16 result = L2CAP_CONF_SUCCESS;
3364 	u16 size;
3365 
3366 	BT_DBG("chan %p", chan);
3367 
3368 	while (len >= L2CAP_CONF_OPT_SIZE) {
3369 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3370 		if (len < 0)
3371 			break;
3372 
3373 		hint  = type & L2CAP_CONF_HINT;
3374 		type &= L2CAP_CONF_MASK;
3375 
3376 		switch (type) {
3377 		case L2CAP_CONF_MTU:
3378 			if (olen != 2)
3379 				break;
3380 			mtu = val;
3381 			break;
3382 
3383 		case L2CAP_CONF_FLUSH_TO:
3384 			if (olen != 2)
3385 				break;
3386 			chan->flush_to = val;
3387 			break;
3388 
3389 		case L2CAP_CONF_QOS:
3390 			break;
3391 
3392 		case L2CAP_CONF_RFC:
3393 			if (olen != sizeof(rfc))
3394 				break;
3395 			memcpy(&rfc, (void *) val, olen);
3396 			break;
3397 
3398 		case L2CAP_CONF_FCS:
3399 			if (olen != 1)
3400 				break;
3401 			if (val == L2CAP_FCS_NONE)
3402 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3403 			break;
3404 
3405 		case L2CAP_CONF_EFS:
3406 			if (olen != sizeof(efs))
3407 				break;
3408 			remote_efs = 1;
3409 			memcpy(&efs, (void *) val, olen);
3410 			break;
3411 
3412 		case L2CAP_CONF_EWS:
3413 			if (olen != 2)
3414 				break;
3415 			return -ECONNREFUSED;
3416 
3417 		default:
3418 			if (hint)
3419 				break;
3420 			result = L2CAP_CONF_UNKNOWN;
3421 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3422 			break;
3423 		}
3424 	}
3425 
3426 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3427 		goto done;
3428 
3429 	switch (chan->mode) {
3430 	case L2CAP_MODE_STREAMING:
3431 	case L2CAP_MODE_ERTM:
3432 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3433 			chan->mode = l2cap_select_mode(rfc.mode,
3434 						       chan->conn->feat_mask);
3435 			break;
3436 		}
3437 
3438 		if (remote_efs) {
3439 			if (__l2cap_efs_supported(chan->conn))
3440 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3441 			else
3442 				return -ECONNREFUSED;
3443 		}
3444 
3445 		if (chan->mode != rfc.mode)
3446 			return -ECONNREFUSED;
3447 
3448 		break;
3449 	}
3450 
3451 done:
3452 	if (chan->mode != rfc.mode) {
3453 		result = L2CAP_CONF_UNACCEPT;
3454 		rfc.mode = chan->mode;
3455 
3456 		if (chan->num_conf_rsp == 1)
3457 			return -ECONNREFUSED;
3458 
3459 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 				   (unsigned long) &rfc, endptr - ptr);
3461 	}
3462 
3463 	if (result == L2CAP_CONF_SUCCESS) {
3464 		/* Configure output options and let the other side know
3465 		 * which ones we don't like. */
3466 
3467 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3468 			result = L2CAP_CONF_UNACCEPT;
3469 		else {
3470 			chan->omtu = mtu;
3471 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3472 		}
3473 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3474 
3475 		if (remote_efs) {
3476 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3477 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3478 			    efs.stype != chan->local_stype) {
3479 
3480 				result = L2CAP_CONF_UNACCEPT;
3481 
3482 				if (chan->num_conf_req >= 1)
3483 					return -ECONNREFUSED;
3484 
3485 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3486 						   sizeof(efs),
3487 						   (unsigned long) &efs, endptr - ptr);
3488 			} else {
3489 				/* Send PENDING Conf Rsp */
3490 				result = L2CAP_CONF_PENDING;
3491 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3492 			}
3493 		}
3494 
3495 		switch (rfc.mode) {
3496 		case L2CAP_MODE_BASIC:
3497 			chan->fcs = L2CAP_FCS_NONE;
3498 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3499 			break;
3500 
3501 		case L2CAP_MODE_ERTM:
3502 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3503 				chan->remote_tx_win = rfc.txwin_size;
3504 			else
3505 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3506 
3507 			chan->remote_max_tx = rfc.max_transmit;
3508 
3509 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3510 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3511 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3512 			rfc.max_pdu_size = cpu_to_le16(size);
3513 			chan->remote_mps = size;
3514 
3515 			__l2cap_set_ertm_timeouts(chan, &rfc);
3516 
3517 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3518 
3519 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3520 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3521 
3522 			if (remote_efs &&
3523 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3524 				chan->remote_id = efs.id;
3525 				chan->remote_stype = efs.stype;
3526 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3527 				chan->remote_flush_to =
3528 					le32_to_cpu(efs.flush_to);
3529 				chan->remote_acc_lat =
3530 					le32_to_cpu(efs.acc_lat);
3531 				chan->remote_sdu_itime =
3532 					le32_to_cpu(efs.sdu_itime);
3533 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3534 						   sizeof(efs),
3535 						   (unsigned long) &efs, endptr - ptr);
3536 			}
3537 			break;
3538 
3539 		case L2CAP_MODE_STREAMING:
3540 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3541 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3542 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3543 			rfc.max_pdu_size = cpu_to_le16(size);
3544 			chan->remote_mps = size;
3545 
3546 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3547 
3548 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3549 					   (unsigned long) &rfc, endptr - ptr);
3550 
3551 			break;
3552 
3553 		default:
3554 			result = L2CAP_CONF_UNACCEPT;
3555 
3556 			memset(&rfc, 0, sizeof(rfc));
3557 			rfc.mode = chan->mode;
3558 		}
3559 
3560 		if (result == L2CAP_CONF_SUCCESS)
3561 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3562 	}
3563 	rsp->scid   = cpu_to_le16(chan->dcid);
3564 	rsp->result = cpu_to_le16(result);
3565 	rsp->flags  = cpu_to_le16(0);
3566 
3567 	return ptr - data;
3568 }
3569 
3570 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3571 				void *data, size_t size, u16 *result)
3572 {
3573 	struct l2cap_conf_req *req = data;
3574 	void *ptr = req->data;
3575 	void *endptr = data + size;
3576 	int type, olen;
3577 	unsigned long val;
3578 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3579 	struct l2cap_conf_efs efs;
3580 
3581 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3582 
3583 	while (len >= L2CAP_CONF_OPT_SIZE) {
3584 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3585 		if (len < 0)
3586 			break;
3587 
3588 		switch (type) {
3589 		case L2CAP_CONF_MTU:
3590 			if (olen != 2)
3591 				break;
3592 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3593 				*result = L2CAP_CONF_UNACCEPT;
3594 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3595 			} else
3596 				chan->imtu = val;
3597 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3598 					   endptr - ptr);
3599 			break;
3600 
3601 		case L2CAP_CONF_FLUSH_TO:
3602 			if (olen != 2)
3603 				break;
3604 			chan->flush_to = val;
3605 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3606 					   chan->flush_to, endptr - ptr);
3607 			break;
3608 
3609 		case L2CAP_CONF_RFC:
3610 			if (olen != sizeof(rfc))
3611 				break;
3612 			memcpy(&rfc, (void *)val, olen);
3613 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3614 			    rfc.mode != chan->mode)
3615 				return -ECONNREFUSED;
3616 			chan->fcs = 0;
3617 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3618 					   (unsigned long) &rfc, endptr - ptr);
3619 			break;
3620 
3621 		case L2CAP_CONF_EWS:
3622 			if (olen != 2)
3623 				break;
3624 			chan->ack_win = min_t(u16, val, chan->ack_win);
3625 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3626 					   chan->tx_win, endptr - ptr);
3627 			break;
3628 
3629 		case L2CAP_CONF_EFS:
3630 			if (olen != sizeof(efs))
3631 				break;
3632 			memcpy(&efs, (void *)val, olen);
3633 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3634 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3635 			    efs.stype != chan->local_stype)
3636 				return -ECONNREFUSED;
3637 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3638 					   (unsigned long) &efs, endptr - ptr);
3639 			break;
3640 
3641 		case L2CAP_CONF_FCS:
3642 			if (olen != 1)
3643 				break;
3644 			if (*result == L2CAP_CONF_PENDING)
3645 				if (val == L2CAP_FCS_NONE)
3646 					set_bit(CONF_RECV_NO_FCS,
3647 						&chan->conf_state);
3648 			break;
3649 		}
3650 	}
3651 
3652 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3653 		return -ECONNREFUSED;
3654 
3655 	chan->mode = rfc.mode;
3656 
3657 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3658 		switch (rfc.mode) {
3659 		case L2CAP_MODE_ERTM:
3660 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3661 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3662 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3663 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3664 				chan->ack_win = min_t(u16, chan->ack_win,
3665 						      rfc.txwin_size);
3666 
3667 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3668 				chan->local_msdu = le16_to_cpu(efs.msdu);
3669 				chan->local_sdu_itime =
3670 					le32_to_cpu(efs.sdu_itime);
3671 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3672 				chan->local_flush_to =
3673 					le32_to_cpu(efs.flush_to);
3674 			}
3675 			break;
3676 
3677 		case L2CAP_MODE_STREAMING:
3678 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3679 		}
3680 	}
3681 
3682 	req->dcid   = cpu_to_le16(chan->dcid);
3683 	req->flags  = cpu_to_le16(0);
3684 
3685 	return ptr - data;
3686 }
3687 
3688 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3689 				u16 result, u16 flags)
3690 {
3691 	struct l2cap_conf_rsp *rsp = data;
3692 	void *ptr = rsp->data;
3693 
3694 	BT_DBG("chan %p", chan);
3695 
3696 	rsp->scid   = cpu_to_le16(chan->dcid);
3697 	rsp->result = cpu_to_le16(result);
3698 	rsp->flags  = cpu_to_le16(flags);
3699 
3700 	return ptr - data;
3701 }
3702 
3703 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3704 {
3705 	struct l2cap_le_conn_rsp rsp;
3706 	struct l2cap_conn *conn = chan->conn;
3707 
3708 	BT_DBG("chan %p", chan);
3709 
3710 	rsp.dcid    = cpu_to_le16(chan->scid);
3711 	rsp.mtu     = cpu_to_le16(chan->imtu);
3712 	rsp.mps     = cpu_to_le16(chan->mps);
3713 	rsp.credits = cpu_to_le16(chan->rx_credits);
3714 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3715 
3716 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3717 		       &rsp);
3718 }
3719 
3720 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3721 {
3722 	int *result = data;
3723 
3724 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3725 		return;
3726 
3727 	switch (chan->state) {
3728 	case BT_CONNECT2:
3729 		/* If channel still pending accept add to result */
3730 		(*result)++;
3731 		return;
3732 	case BT_CONNECTED:
3733 		return;
3734 	default:
3735 		/* If not connected or pending accept it has been refused */
3736 		*result = -ECONNREFUSED;
3737 		return;
3738 	}
3739 }
3740 
3741 struct l2cap_ecred_rsp_data {
3742 	struct {
3743 		struct l2cap_ecred_conn_rsp rsp;
3744 		__le16 scid[L2CAP_ECRED_MAX_CID];
3745 	} __packed pdu;
3746 	int count;
3747 };
3748 
3749 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3750 {
3751 	struct l2cap_ecred_rsp_data *rsp = data;
3752 
3753 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3754 		return;
3755 
3756 	/* Reset ident so only one response is sent */
3757 	chan->ident = 0;
3758 
3759 	/* Include all channels pending with the same ident */
3760 	if (!rsp->pdu.rsp.result)
3761 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3762 	else
3763 		l2cap_chan_del(chan, ECONNRESET);
3764 }
3765 
3766 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3767 {
3768 	struct l2cap_conn *conn = chan->conn;
3769 	struct l2cap_ecred_rsp_data data;
3770 	u16 id = chan->ident;
3771 	int result = 0;
3772 
3773 	if (!id)
3774 		return;
3775 
3776 	BT_DBG("chan %p id %d", chan, id);
3777 
3778 	memset(&data, 0, sizeof(data));
3779 
3780 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3781 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3782 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3783 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3784 
3785 	/* Verify that all channels are ready */
3786 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3787 
3788 	if (result > 0)
3789 		return;
3790 
3791 	if (result < 0)
3792 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3793 
3794 	/* Build response */
3795 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3796 
3797 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3798 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3799 		       &data.pdu);
3800 }
3801 
3802 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3803 {
3804 	struct l2cap_conn_rsp rsp;
3805 	struct l2cap_conn *conn = chan->conn;
3806 	u8 buf[128];
3807 	u8 rsp_code;
3808 
3809 	rsp.scid   = cpu_to_le16(chan->dcid);
3810 	rsp.dcid   = cpu_to_le16(chan->scid);
3811 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3812 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3813 	rsp_code = L2CAP_CONN_RSP;
3814 
3815 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3816 
3817 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3818 
3819 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3820 		return;
3821 
3822 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3823 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3824 	chan->num_conf_req++;
3825 }
3826 
3827 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3828 {
3829 	int type, olen;
3830 	unsigned long val;
3831 	/* Use sane default values in case a misbehaving remote device
3832 	 * did not send an RFC or extended window size option.
3833 	 */
3834 	u16 txwin_ext = chan->ack_win;
3835 	struct l2cap_conf_rfc rfc = {
3836 		.mode = chan->mode,
3837 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3838 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3839 		.max_pdu_size = cpu_to_le16(chan->imtu),
3840 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3841 	};
3842 
3843 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3844 
3845 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3846 		return;
3847 
3848 	while (len >= L2CAP_CONF_OPT_SIZE) {
3849 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3850 		if (len < 0)
3851 			break;
3852 
3853 		switch (type) {
3854 		case L2CAP_CONF_RFC:
3855 			if (olen != sizeof(rfc))
3856 				break;
3857 			memcpy(&rfc, (void *)val, olen);
3858 			break;
3859 		case L2CAP_CONF_EWS:
3860 			if (olen != 2)
3861 				break;
3862 			txwin_ext = val;
3863 			break;
3864 		}
3865 	}
3866 
3867 	switch (rfc.mode) {
3868 	case L2CAP_MODE_ERTM:
3869 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3870 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3871 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3872 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3873 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3874 		else
3875 			chan->ack_win = min_t(u16, chan->ack_win,
3876 					      rfc.txwin_size);
3877 		break;
3878 	case L2CAP_MODE_STREAMING:
3879 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3880 	}
3881 }
3882 
3883 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3884 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3885 				    u8 *data)
3886 {
3887 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3888 
3889 	if (cmd_len < sizeof(*rej))
3890 		return -EPROTO;
3891 
3892 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3893 		return 0;
3894 
3895 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3896 	    cmd->ident == conn->info_ident) {
3897 		cancel_delayed_work(&conn->info_timer);
3898 
3899 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3900 		conn->info_ident = 0;
3901 
3902 		l2cap_conn_start(conn);
3903 	}
3904 
3905 	return 0;
3906 }
3907 
3908 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3909 					struct l2cap_cmd_hdr *cmd,
3910 					u8 *data, u8 rsp_code, u8 amp_id)
3911 {
3912 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3913 	struct l2cap_conn_rsp rsp;
3914 	struct l2cap_chan *chan = NULL, *pchan;
3915 	int result, status = L2CAP_CS_NO_INFO;
3916 
3917 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3918 	__le16 psm = req->psm;
3919 
3920 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3921 
3922 	/* Check if we have socket listening on psm */
3923 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3924 					 &conn->hcon->dst, ACL_LINK);
3925 	if (!pchan) {
3926 		result = L2CAP_CR_BAD_PSM;
3927 		goto sendresp;
3928 	}
3929 
3930 	mutex_lock(&conn->chan_lock);
3931 	l2cap_chan_lock(pchan);
3932 
3933 	/* Check if the ACL is secure enough (if not SDP) */
3934 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3935 	    !hci_conn_check_link_mode(conn->hcon)) {
3936 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3937 		result = L2CAP_CR_SEC_BLOCK;
3938 		goto response;
3939 	}
3940 
3941 	result = L2CAP_CR_NO_MEM;
3942 
3943 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3944 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3945 		result = L2CAP_CR_INVALID_SCID;
3946 		goto response;
3947 	}
3948 
3949 	/* Check if we already have channel with that dcid */
3950 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3951 		result = L2CAP_CR_SCID_IN_USE;
3952 		goto response;
3953 	}
3954 
3955 	chan = pchan->ops->new_connection(pchan);
3956 	if (!chan)
3957 		goto response;
3958 
3959 	/* For certain devices (ex: HID mouse), support for authentication,
3960 	 * pairing and bonding is optional. For such devices, inorder to avoid
3961 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3962 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3963 	 */
3964 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3965 
3966 	bacpy(&chan->src, &conn->hcon->src);
3967 	bacpy(&chan->dst, &conn->hcon->dst);
3968 	chan->src_type = bdaddr_src_type(conn->hcon);
3969 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3970 	chan->psm  = psm;
3971 	chan->dcid = scid;
3972 
3973 	__l2cap_chan_add(conn, chan);
3974 
3975 	dcid = chan->scid;
3976 
3977 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3978 
3979 	chan->ident = cmd->ident;
3980 
3981 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3982 		if (l2cap_chan_check_security(chan, false)) {
3983 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3984 				l2cap_state_change(chan, BT_CONNECT2);
3985 				result = L2CAP_CR_PEND;
3986 				status = L2CAP_CS_AUTHOR_PEND;
3987 				chan->ops->defer(chan);
3988 			} else {
3989 				/* Force pending result for AMP controllers.
3990 				 * The connection will succeed after the
3991 				 * physical link is up.
3992 				 */
3993 				if (amp_id == AMP_ID_BREDR) {
3994 					l2cap_state_change(chan, BT_CONFIG);
3995 					result = L2CAP_CR_SUCCESS;
3996 				} else {
3997 					l2cap_state_change(chan, BT_CONNECT2);
3998 					result = L2CAP_CR_PEND;
3999 				}
4000 				status = L2CAP_CS_NO_INFO;
4001 			}
4002 		} else {
4003 			l2cap_state_change(chan, BT_CONNECT2);
4004 			result = L2CAP_CR_PEND;
4005 			status = L2CAP_CS_AUTHEN_PEND;
4006 		}
4007 	} else {
4008 		l2cap_state_change(chan, BT_CONNECT2);
4009 		result = L2CAP_CR_PEND;
4010 		status = L2CAP_CS_NO_INFO;
4011 	}
4012 
4013 response:
4014 	l2cap_chan_unlock(pchan);
4015 	mutex_unlock(&conn->chan_lock);
4016 	l2cap_chan_put(pchan);
4017 
4018 sendresp:
4019 	rsp.scid   = cpu_to_le16(scid);
4020 	rsp.dcid   = cpu_to_le16(dcid);
4021 	rsp.result = cpu_to_le16(result);
4022 	rsp.status = cpu_to_le16(status);
4023 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4024 
4025 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4026 		struct l2cap_info_req info;
4027 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4028 
4029 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4030 		conn->info_ident = l2cap_get_ident(conn);
4031 
4032 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4033 
4034 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4035 			       sizeof(info), &info);
4036 	}
4037 
4038 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4039 	    result == L2CAP_CR_SUCCESS) {
4040 		u8 buf[128];
4041 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4042 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4043 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4044 		chan->num_conf_req++;
4045 	}
4046 
4047 	return chan;
4048 }
4049 
4050 static int l2cap_connect_req(struct l2cap_conn *conn,
4051 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4052 {
4053 	struct hci_dev *hdev = conn->hcon->hdev;
4054 	struct hci_conn *hcon = conn->hcon;
4055 
4056 	if (cmd_len < sizeof(struct l2cap_conn_req))
4057 		return -EPROTO;
4058 
4059 	hci_dev_lock(hdev);
4060 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4061 		mgmt_device_connected(hdev, hcon, NULL, 0);
4062 	hci_dev_unlock(hdev);
4063 
4064 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4065 	return 0;
4066 }
4067 
4068 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4069 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4070 				    u8 *data)
4071 {
4072 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4073 	u16 scid, dcid, result, status;
4074 	struct l2cap_chan *chan;
4075 	u8 req[128];
4076 	int err;
4077 
4078 	if (cmd_len < sizeof(*rsp))
4079 		return -EPROTO;
4080 
4081 	scid   = __le16_to_cpu(rsp->scid);
4082 	dcid   = __le16_to_cpu(rsp->dcid);
4083 	result = __le16_to_cpu(rsp->result);
4084 	status = __le16_to_cpu(rsp->status);
4085 
4086 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4087 					   dcid > L2CAP_CID_DYN_END))
4088 		return -EPROTO;
4089 
4090 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4091 	       dcid, scid, result, status);
4092 
4093 	mutex_lock(&conn->chan_lock);
4094 
4095 	if (scid) {
4096 		chan = __l2cap_get_chan_by_scid(conn, scid);
4097 		if (!chan) {
4098 			err = -EBADSLT;
4099 			goto unlock;
4100 		}
4101 	} else {
4102 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4103 		if (!chan) {
4104 			err = -EBADSLT;
4105 			goto unlock;
4106 		}
4107 	}
4108 
4109 	chan = l2cap_chan_hold_unless_zero(chan);
4110 	if (!chan) {
4111 		err = -EBADSLT;
4112 		goto unlock;
4113 	}
4114 
4115 	err = 0;
4116 
4117 	l2cap_chan_lock(chan);
4118 
4119 	switch (result) {
4120 	case L2CAP_CR_SUCCESS:
4121 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4122 			err = -EBADSLT;
4123 			break;
4124 		}
4125 
4126 		l2cap_state_change(chan, BT_CONFIG);
4127 		chan->ident = 0;
4128 		chan->dcid = dcid;
4129 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4130 
4131 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4132 			break;
4133 
4134 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4135 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4136 		chan->num_conf_req++;
4137 		break;
4138 
4139 	case L2CAP_CR_PEND:
4140 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4141 		break;
4142 
4143 	default:
4144 		l2cap_chan_del(chan, ECONNREFUSED);
4145 		break;
4146 	}
4147 
4148 	l2cap_chan_unlock(chan);
4149 	l2cap_chan_put(chan);
4150 
4151 unlock:
4152 	mutex_unlock(&conn->chan_lock);
4153 
4154 	return err;
4155 }
4156 
4157 static inline void set_default_fcs(struct l2cap_chan *chan)
4158 {
4159 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4160 	 * sides request it.
4161 	 */
4162 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4163 		chan->fcs = L2CAP_FCS_NONE;
4164 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4165 		chan->fcs = L2CAP_FCS_CRC16;
4166 }
4167 
4168 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4169 				    u8 ident, u16 flags)
4170 {
4171 	struct l2cap_conn *conn = chan->conn;
4172 
4173 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4174 	       flags);
4175 
4176 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4177 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4178 
4179 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4180 		       l2cap_build_conf_rsp(chan, data,
4181 					    L2CAP_CONF_SUCCESS, flags), data);
4182 }
4183 
4184 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4185 				   u16 scid, u16 dcid)
4186 {
4187 	struct l2cap_cmd_rej_cid rej;
4188 
4189 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4190 	rej.scid = __cpu_to_le16(scid);
4191 	rej.dcid = __cpu_to_le16(dcid);
4192 
4193 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4194 }
4195 
4196 static inline int l2cap_config_req(struct l2cap_conn *conn,
4197 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4198 				   u8 *data)
4199 {
4200 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4201 	u16 dcid, flags;
4202 	u8 rsp[64];
4203 	struct l2cap_chan *chan;
4204 	int len, err = 0;
4205 
4206 	if (cmd_len < sizeof(*req))
4207 		return -EPROTO;
4208 
4209 	dcid  = __le16_to_cpu(req->dcid);
4210 	flags = __le16_to_cpu(req->flags);
4211 
4212 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4213 
4214 	chan = l2cap_get_chan_by_scid(conn, dcid);
4215 	if (!chan) {
4216 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4217 		return 0;
4218 	}
4219 
4220 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4221 	    chan->state != BT_CONNECTED) {
4222 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4223 				       chan->dcid);
4224 		goto unlock;
4225 	}
4226 
4227 	/* Reject if config buffer is too small. */
4228 	len = cmd_len - sizeof(*req);
4229 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4230 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4231 			       l2cap_build_conf_rsp(chan, rsp,
4232 			       L2CAP_CONF_REJECT, flags), rsp);
4233 		goto unlock;
4234 	}
4235 
4236 	/* Store config. */
4237 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4238 	chan->conf_len += len;
4239 
4240 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4241 		/* Incomplete config. Send empty response. */
4242 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4243 			       l2cap_build_conf_rsp(chan, rsp,
4244 			       L2CAP_CONF_SUCCESS, flags), rsp);
4245 		goto unlock;
4246 	}
4247 
4248 	/* Complete config. */
4249 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4250 	if (len < 0) {
4251 		l2cap_send_disconn_req(chan, ECONNRESET);
4252 		goto unlock;
4253 	}
4254 
4255 	chan->ident = cmd->ident;
4256 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4257 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4258 		chan->num_conf_rsp++;
4259 
4260 	/* Reset config buffer. */
4261 	chan->conf_len = 0;
4262 
4263 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4264 		goto unlock;
4265 
4266 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4267 		set_default_fcs(chan);
4268 
4269 		if (chan->mode == L2CAP_MODE_ERTM ||
4270 		    chan->mode == L2CAP_MODE_STREAMING)
4271 			err = l2cap_ertm_init(chan);
4272 
4273 		if (err < 0)
4274 			l2cap_send_disconn_req(chan, -err);
4275 		else
4276 			l2cap_chan_ready(chan);
4277 
4278 		goto unlock;
4279 	}
4280 
4281 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4282 		u8 buf[64];
4283 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4284 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4285 		chan->num_conf_req++;
4286 	}
4287 
4288 	/* Got Conf Rsp PENDING from remote side and assume we sent
4289 	   Conf Rsp PENDING in the code above */
4290 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4291 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4292 
4293 		/* check compatibility */
4294 
4295 		/* Send rsp for BR/EDR channel */
4296 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4297 	}
4298 
4299 unlock:
4300 	l2cap_chan_unlock(chan);
4301 	l2cap_chan_put(chan);
4302 	return err;
4303 }
4304 
4305 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4306 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4307 				   u8 *data)
4308 {
4309 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4310 	u16 scid, flags, result;
4311 	struct l2cap_chan *chan;
4312 	int len = cmd_len - sizeof(*rsp);
4313 	int err = 0;
4314 
4315 	if (cmd_len < sizeof(*rsp))
4316 		return -EPROTO;
4317 
4318 	scid   = __le16_to_cpu(rsp->scid);
4319 	flags  = __le16_to_cpu(rsp->flags);
4320 	result = __le16_to_cpu(rsp->result);
4321 
4322 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4323 	       result, len);
4324 
4325 	chan = l2cap_get_chan_by_scid(conn, scid);
4326 	if (!chan)
4327 		return 0;
4328 
4329 	switch (result) {
4330 	case L2CAP_CONF_SUCCESS:
4331 		l2cap_conf_rfc_get(chan, rsp->data, len);
4332 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4333 		break;
4334 
4335 	case L2CAP_CONF_PENDING:
4336 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4337 
4338 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4339 			char buf[64];
4340 
4341 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4342 						   buf, sizeof(buf), &result);
4343 			if (len < 0) {
4344 				l2cap_send_disconn_req(chan, ECONNRESET);
4345 				goto done;
4346 			}
4347 
4348 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4349 		}
4350 		goto done;
4351 
4352 	case L2CAP_CONF_UNKNOWN:
4353 	case L2CAP_CONF_UNACCEPT:
4354 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4355 			char req[64];
4356 
4357 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4358 				l2cap_send_disconn_req(chan, ECONNRESET);
4359 				goto done;
4360 			}
4361 
4362 			/* throw out any old stored conf requests */
4363 			result = L2CAP_CONF_SUCCESS;
4364 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4365 						   req, sizeof(req), &result);
4366 			if (len < 0) {
4367 				l2cap_send_disconn_req(chan, ECONNRESET);
4368 				goto done;
4369 			}
4370 
4371 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4372 				       L2CAP_CONF_REQ, len, req);
4373 			chan->num_conf_req++;
4374 			if (result != L2CAP_CONF_SUCCESS)
4375 				goto done;
4376 			break;
4377 		}
4378 		fallthrough;
4379 
4380 	default:
4381 		l2cap_chan_set_err(chan, ECONNRESET);
4382 
4383 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4384 		l2cap_send_disconn_req(chan, ECONNRESET);
4385 		goto done;
4386 	}
4387 
4388 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4389 		goto done;
4390 
4391 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4392 
4393 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4394 		set_default_fcs(chan);
4395 
4396 		if (chan->mode == L2CAP_MODE_ERTM ||
4397 		    chan->mode == L2CAP_MODE_STREAMING)
4398 			err = l2cap_ertm_init(chan);
4399 
4400 		if (err < 0)
4401 			l2cap_send_disconn_req(chan, -err);
4402 		else
4403 			l2cap_chan_ready(chan);
4404 	}
4405 
4406 done:
4407 	l2cap_chan_unlock(chan);
4408 	l2cap_chan_put(chan);
4409 	return err;
4410 }
4411 
4412 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4413 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4414 				       u8 *data)
4415 {
4416 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4417 	struct l2cap_disconn_rsp rsp;
4418 	u16 dcid, scid;
4419 	struct l2cap_chan *chan;
4420 
4421 	if (cmd_len != sizeof(*req))
4422 		return -EPROTO;
4423 
4424 	scid = __le16_to_cpu(req->scid);
4425 	dcid = __le16_to_cpu(req->dcid);
4426 
4427 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4428 
4429 	chan = l2cap_get_chan_by_scid(conn, dcid);
4430 	if (!chan) {
4431 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4432 		return 0;
4433 	}
4434 
4435 	rsp.dcid = cpu_to_le16(chan->scid);
4436 	rsp.scid = cpu_to_le16(chan->dcid);
4437 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4438 
4439 	chan->ops->set_shutdown(chan);
4440 
4441 	l2cap_chan_unlock(chan);
4442 	mutex_lock(&conn->chan_lock);
4443 	l2cap_chan_lock(chan);
4444 	l2cap_chan_del(chan, ECONNRESET);
4445 	mutex_unlock(&conn->chan_lock);
4446 
4447 	chan->ops->close(chan);
4448 
4449 	l2cap_chan_unlock(chan);
4450 	l2cap_chan_put(chan);
4451 
4452 	return 0;
4453 }
4454 
4455 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4456 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4457 				       u8 *data)
4458 {
4459 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4460 	u16 dcid, scid;
4461 	struct l2cap_chan *chan;
4462 
4463 	if (cmd_len != sizeof(*rsp))
4464 		return -EPROTO;
4465 
4466 	scid = __le16_to_cpu(rsp->scid);
4467 	dcid = __le16_to_cpu(rsp->dcid);
4468 
4469 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4470 
4471 	chan = l2cap_get_chan_by_scid(conn, scid);
4472 	if (!chan) {
4473 		return 0;
4474 	}
4475 
4476 	if (chan->state != BT_DISCONN) {
4477 		l2cap_chan_unlock(chan);
4478 		l2cap_chan_put(chan);
4479 		return 0;
4480 	}
4481 
4482 	l2cap_chan_unlock(chan);
4483 	mutex_lock(&conn->chan_lock);
4484 	l2cap_chan_lock(chan);
4485 	l2cap_chan_del(chan, 0);
4486 	mutex_unlock(&conn->chan_lock);
4487 
4488 	chan->ops->close(chan);
4489 
4490 	l2cap_chan_unlock(chan);
4491 	l2cap_chan_put(chan);
4492 
4493 	return 0;
4494 }
4495 
4496 static inline int l2cap_information_req(struct l2cap_conn *conn,
4497 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4498 					u8 *data)
4499 {
4500 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4501 	u16 type;
4502 
4503 	if (cmd_len != sizeof(*req))
4504 		return -EPROTO;
4505 
4506 	type = __le16_to_cpu(req->type);
4507 
4508 	BT_DBG("type 0x%4.4x", type);
4509 
4510 	if (type == L2CAP_IT_FEAT_MASK) {
4511 		u8 buf[8];
4512 		u32 feat_mask = l2cap_feat_mask;
4513 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4514 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4515 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4516 		if (!disable_ertm)
4517 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4518 				| L2CAP_FEAT_FCS;
4519 
4520 		put_unaligned_le32(feat_mask, rsp->data);
4521 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4522 			       buf);
4523 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4524 		u8 buf[12];
4525 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4526 
4527 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4528 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4529 		rsp->data[0] = conn->local_fixed_chan;
4530 		memset(rsp->data + 1, 0, 7);
4531 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4532 			       buf);
4533 	} else {
4534 		struct l2cap_info_rsp rsp;
4535 		rsp.type   = cpu_to_le16(type);
4536 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4537 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4538 			       &rsp);
4539 	}
4540 
4541 	return 0;
4542 }
4543 
4544 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4545 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4546 					u8 *data)
4547 {
4548 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4549 	u16 type, result;
4550 
4551 	if (cmd_len < sizeof(*rsp))
4552 		return -EPROTO;
4553 
4554 	type   = __le16_to_cpu(rsp->type);
4555 	result = __le16_to_cpu(rsp->result);
4556 
4557 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4558 
4559 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4560 	if (cmd->ident != conn->info_ident ||
4561 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4562 		return 0;
4563 
4564 	cancel_delayed_work(&conn->info_timer);
4565 
4566 	if (result != L2CAP_IR_SUCCESS) {
4567 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4568 		conn->info_ident = 0;
4569 
4570 		l2cap_conn_start(conn);
4571 
4572 		return 0;
4573 	}
4574 
4575 	switch (type) {
4576 	case L2CAP_IT_FEAT_MASK:
4577 		conn->feat_mask = get_unaligned_le32(rsp->data);
4578 
4579 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4580 			struct l2cap_info_req req;
4581 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4582 
4583 			conn->info_ident = l2cap_get_ident(conn);
4584 
4585 			l2cap_send_cmd(conn, conn->info_ident,
4586 				       L2CAP_INFO_REQ, sizeof(req), &req);
4587 		} else {
4588 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4589 			conn->info_ident = 0;
4590 
4591 			l2cap_conn_start(conn);
4592 		}
4593 		break;
4594 
4595 	case L2CAP_IT_FIXED_CHAN:
4596 		conn->remote_fixed_chan = rsp->data[0];
4597 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4598 		conn->info_ident = 0;
4599 
4600 		l2cap_conn_start(conn);
4601 		break;
4602 	}
4603 
4604 	return 0;
4605 }
4606 
4607 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4608 					      struct l2cap_cmd_hdr *cmd,
4609 					      u16 cmd_len, u8 *data)
4610 {
4611 	struct hci_conn *hcon = conn->hcon;
4612 	struct l2cap_conn_param_update_req *req;
4613 	struct l2cap_conn_param_update_rsp rsp;
4614 	u16 min, max, latency, to_multiplier;
4615 	int err;
4616 
4617 	if (hcon->role != HCI_ROLE_MASTER)
4618 		return -EINVAL;
4619 
4620 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4621 		return -EPROTO;
4622 
4623 	req = (struct l2cap_conn_param_update_req *) data;
4624 	min		= __le16_to_cpu(req->min);
4625 	max		= __le16_to_cpu(req->max);
4626 	latency		= __le16_to_cpu(req->latency);
4627 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4628 
4629 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4630 	       min, max, latency, to_multiplier);
4631 
4632 	memset(&rsp, 0, sizeof(rsp));
4633 
4634 	if (max > hcon->le_conn_max_interval) {
4635 		BT_DBG("requested connection interval exceeds current bounds.");
4636 		err = -EINVAL;
4637 	} else {
4638 		err = hci_check_conn_params(min, max, latency, to_multiplier);
4639 	}
4640 
4641 	if (err)
4642 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4643 	else
4644 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4645 
4646 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4647 		       sizeof(rsp), &rsp);
4648 
4649 	if (!err) {
4650 		u8 store_hint;
4651 
4652 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4653 						to_multiplier);
4654 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4655 				    store_hint, min, max, latency,
4656 				    to_multiplier);
4657 
4658 	}
4659 
4660 	return 0;
4661 }
4662 
4663 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4664 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4665 				u8 *data)
4666 {
4667 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4668 	struct hci_conn *hcon = conn->hcon;
4669 	u16 dcid, mtu, mps, credits, result;
4670 	struct l2cap_chan *chan;
4671 	int err, sec_level;
4672 
4673 	if (cmd_len < sizeof(*rsp))
4674 		return -EPROTO;
4675 
4676 	dcid    = __le16_to_cpu(rsp->dcid);
4677 	mtu     = __le16_to_cpu(rsp->mtu);
4678 	mps     = __le16_to_cpu(rsp->mps);
4679 	credits = __le16_to_cpu(rsp->credits);
4680 	result  = __le16_to_cpu(rsp->result);
4681 
4682 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4683 					   dcid < L2CAP_CID_DYN_START ||
4684 					   dcid > L2CAP_CID_LE_DYN_END))
4685 		return -EPROTO;
4686 
4687 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4688 	       dcid, mtu, mps, credits, result);
4689 
4690 	mutex_lock(&conn->chan_lock);
4691 
4692 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4693 	if (!chan) {
4694 		err = -EBADSLT;
4695 		goto unlock;
4696 	}
4697 
4698 	err = 0;
4699 
4700 	l2cap_chan_lock(chan);
4701 
4702 	switch (result) {
4703 	case L2CAP_CR_LE_SUCCESS:
4704 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4705 			err = -EBADSLT;
4706 			break;
4707 		}
4708 
4709 		chan->ident = 0;
4710 		chan->dcid = dcid;
4711 		chan->omtu = mtu;
4712 		chan->remote_mps = mps;
4713 		chan->tx_credits = credits;
4714 		l2cap_chan_ready(chan);
4715 		break;
4716 
4717 	case L2CAP_CR_LE_AUTHENTICATION:
4718 	case L2CAP_CR_LE_ENCRYPTION:
4719 		/* If we already have MITM protection we can't do
4720 		 * anything.
4721 		 */
4722 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4723 			l2cap_chan_del(chan, ECONNREFUSED);
4724 			break;
4725 		}
4726 
4727 		sec_level = hcon->sec_level + 1;
4728 		if (chan->sec_level < sec_level)
4729 			chan->sec_level = sec_level;
4730 
4731 		/* We'll need to send a new Connect Request */
4732 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4733 
4734 		smp_conn_security(hcon, chan->sec_level);
4735 		break;
4736 
4737 	default:
4738 		l2cap_chan_del(chan, ECONNREFUSED);
4739 		break;
4740 	}
4741 
4742 	l2cap_chan_unlock(chan);
4743 
4744 unlock:
4745 	mutex_unlock(&conn->chan_lock);
4746 
4747 	return err;
4748 }
4749 
4750 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4751 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4752 				      u8 *data)
4753 {
4754 	int err = 0;
4755 
4756 	switch (cmd->code) {
4757 	case L2CAP_COMMAND_REJ:
4758 		l2cap_command_rej(conn, cmd, cmd_len, data);
4759 		break;
4760 
4761 	case L2CAP_CONN_REQ:
4762 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4763 		break;
4764 
4765 	case L2CAP_CONN_RSP:
4766 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4767 		break;
4768 
4769 	case L2CAP_CONF_REQ:
4770 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4771 		break;
4772 
4773 	case L2CAP_CONF_RSP:
4774 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4775 		break;
4776 
4777 	case L2CAP_DISCONN_REQ:
4778 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4779 		break;
4780 
4781 	case L2CAP_DISCONN_RSP:
4782 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4783 		break;
4784 
4785 	case L2CAP_ECHO_REQ:
4786 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4787 		break;
4788 
4789 	case L2CAP_ECHO_RSP:
4790 		break;
4791 
4792 	case L2CAP_INFO_REQ:
4793 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4794 		break;
4795 
4796 	case L2CAP_INFO_RSP:
4797 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4798 		break;
4799 
4800 	default:
4801 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4802 		err = -EINVAL;
4803 		break;
4804 	}
4805 
4806 	return err;
4807 }
4808 
4809 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4810 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4811 				u8 *data)
4812 {
4813 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4814 	struct l2cap_le_conn_rsp rsp;
4815 	struct l2cap_chan *chan, *pchan;
4816 	u16 dcid, scid, credits, mtu, mps;
4817 	__le16 psm;
4818 	u8 result;
4819 
4820 	if (cmd_len != sizeof(*req))
4821 		return -EPROTO;
4822 
4823 	scid = __le16_to_cpu(req->scid);
4824 	mtu  = __le16_to_cpu(req->mtu);
4825 	mps  = __le16_to_cpu(req->mps);
4826 	psm  = req->psm;
4827 	dcid = 0;
4828 	credits = 0;
4829 
4830 	if (mtu < 23 || mps < 23)
4831 		return -EPROTO;
4832 
4833 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4834 	       scid, mtu, mps);
4835 
4836 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4837 	 * page 1059:
4838 	 *
4839 	 * Valid range: 0x0001-0x00ff
4840 	 *
4841 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4842 	 */
4843 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4844 		result = L2CAP_CR_LE_BAD_PSM;
4845 		chan = NULL;
4846 		goto response;
4847 	}
4848 
4849 	/* Check if we have socket listening on psm */
4850 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4851 					 &conn->hcon->dst, LE_LINK);
4852 	if (!pchan) {
4853 		result = L2CAP_CR_LE_BAD_PSM;
4854 		chan = NULL;
4855 		goto response;
4856 	}
4857 
4858 	mutex_lock(&conn->chan_lock);
4859 	l2cap_chan_lock(pchan);
4860 
4861 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4862 				     SMP_ALLOW_STK)) {
4863 		result = L2CAP_CR_LE_AUTHENTICATION;
4864 		chan = NULL;
4865 		goto response_unlock;
4866 	}
4867 
4868 	/* Check for valid dynamic CID range */
4869 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4870 		result = L2CAP_CR_LE_INVALID_SCID;
4871 		chan = NULL;
4872 		goto response_unlock;
4873 	}
4874 
4875 	/* Check if we already have channel with that dcid */
4876 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4877 		result = L2CAP_CR_LE_SCID_IN_USE;
4878 		chan = NULL;
4879 		goto response_unlock;
4880 	}
4881 
4882 	chan = pchan->ops->new_connection(pchan);
4883 	if (!chan) {
4884 		result = L2CAP_CR_LE_NO_MEM;
4885 		goto response_unlock;
4886 	}
4887 
4888 	bacpy(&chan->src, &conn->hcon->src);
4889 	bacpy(&chan->dst, &conn->hcon->dst);
4890 	chan->src_type = bdaddr_src_type(conn->hcon);
4891 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4892 	chan->psm  = psm;
4893 	chan->dcid = scid;
4894 	chan->omtu = mtu;
4895 	chan->remote_mps = mps;
4896 
4897 	__l2cap_chan_add(conn, chan);
4898 
4899 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4900 
4901 	dcid = chan->scid;
4902 	credits = chan->rx_credits;
4903 
4904 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4905 
4906 	chan->ident = cmd->ident;
4907 
4908 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4909 		l2cap_state_change(chan, BT_CONNECT2);
4910 		/* The following result value is actually not defined
4911 		 * for LE CoC but we use it to let the function know
4912 		 * that it should bail out after doing its cleanup
4913 		 * instead of sending a response.
4914 		 */
4915 		result = L2CAP_CR_PEND;
4916 		chan->ops->defer(chan);
4917 	} else {
4918 		l2cap_chan_ready(chan);
4919 		result = L2CAP_CR_LE_SUCCESS;
4920 	}
4921 
4922 response_unlock:
4923 	l2cap_chan_unlock(pchan);
4924 	mutex_unlock(&conn->chan_lock);
4925 	l2cap_chan_put(pchan);
4926 
4927 	if (result == L2CAP_CR_PEND)
4928 		return 0;
4929 
4930 response:
4931 	if (chan) {
4932 		rsp.mtu = cpu_to_le16(chan->imtu);
4933 		rsp.mps = cpu_to_le16(chan->mps);
4934 	} else {
4935 		rsp.mtu = 0;
4936 		rsp.mps = 0;
4937 	}
4938 
4939 	rsp.dcid    = cpu_to_le16(dcid);
4940 	rsp.credits = cpu_to_le16(credits);
4941 	rsp.result  = cpu_to_le16(result);
4942 
4943 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4944 
4945 	return 0;
4946 }
4947 
4948 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4949 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4950 				   u8 *data)
4951 {
4952 	struct l2cap_le_credits *pkt;
4953 	struct l2cap_chan *chan;
4954 	u16 cid, credits, max_credits;
4955 
4956 	if (cmd_len != sizeof(*pkt))
4957 		return -EPROTO;
4958 
4959 	pkt = (struct l2cap_le_credits *) data;
4960 	cid	= __le16_to_cpu(pkt->cid);
4961 	credits	= __le16_to_cpu(pkt->credits);
4962 
4963 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4964 
4965 	chan = l2cap_get_chan_by_dcid(conn, cid);
4966 	if (!chan)
4967 		return -EBADSLT;
4968 
4969 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4970 	if (credits > max_credits) {
4971 		BT_ERR("LE credits overflow");
4972 		l2cap_send_disconn_req(chan, ECONNRESET);
4973 
4974 		/* Return 0 so that we don't trigger an unnecessary
4975 		 * command reject packet.
4976 		 */
4977 		goto unlock;
4978 	}
4979 
4980 	chan->tx_credits += credits;
4981 
4982 	/* Resume sending */
4983 	l2cap_le_flowctl_send(chan);
4984 
4985 	if (chan->tx_credits)
4986 		chan->ops->resume(chan);
4987 
4988 unlock:
4989 	l2cap_chan_unlock(chan);
4990 	l2cap_chan_put(chan);
4991 
4992 	return 0;
4993 }
4994 
4995 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4996 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4997 				       u8 *data)
4998 {
4999 	struct l2cap_ecred_conn_req *req = (void *) data;
5000 	struct {
5001 		struct l2cap_ecred_conn_rsp rsp;
5002 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5003 	} __packed pdu;
5004 	struct l2cap_chan *chan, *pchan;
5005 	u16 mtu, mps;
5006 	__le16 psm;
5007 	u8 result, len = 0;
5008 	int i, num_scid;
5009 	bool defer = false;
5010 
5011 	if (!enable_ecred)
5012 		return -EINVAL;
5013 
5014 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5015 		result = L2CAP_CR_LE_INVALID_PARAMS;
5016 		goto response;
5017 	}
5018 
5019 	cmd_len -= sizeof(*req);
5020 	num_scid = cmd_len / sizeof(u16);
5021 
5022 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5023 		result = L2CAP_CR_LE_INVALID_PARAMS;
5024 		goto response;
5025 	}
5026 
5027 	mtu  = __le16_to_cpu(req->mtu);
5028 	mps  = __le16_to_cpu(req->mps);
5029 
5030 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5031 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5032 		goto response;
5033 	}
5034 
5035 	psm  = req->psm;
5036 
5037 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5038 	 * page 1059:
5039 	 *
5040 	 * Valid range: 0x0001-0x00ff
5041 	 *
5042 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5043 	 */
5044 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5045 		result = L2CAP_CR_LE_BAD_PSM;
5046 		goto response;
5047 	}
5048 
5049 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5050 
5051 	memset(&pdu, 0, sizeof(pdu));
5052 
5053 	/* Check if we have socket listening on psm */
5054 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5055 					 &conn->hcon->dst, LE_LINK);
5056 	if (!pchan) {
5057 		result = L2CAP_CR_LE_BAD_PSM;
5058 		goto response;
5059 	}
5060 
5061 	mutex_lock(&conn->chan_lock);
5062 	l2cap_chan_lock(pchan);
5063 
5064 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5065 				     SMP_ALLOW_STK)) {
5066 		result = L2CAP_CR_LE_AUTHENTICATION;
5067 		goto unlock;
5068 	}
5069 
5070 	result = L2CAP_CR_LE_SUCCESS;
5071 
5072 	for (i = 0; i < num_scid; i++) {
5073 		u16 scid = __le16_to_cpu(req->scid[i]);
5074 
5075 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5076 
5077 		pdu.dcid[i] = 0x0000;
5078 		len += sizeof(*pdu.dcid);
5079 
5080 		/* Check for valid dynamic CID range */
5081 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5082 			result = L2CAP_CR_LE_INVALID_SCID;
5083 			continue;
5084 		}
5085 
5086 		/* Check if we already have channel with that dcid */
5087 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5088 			result = L2CAP_CR_LE_SCID_IN_USE;
5089 			continue;
5090 		}
5091 
5092 		chan = pchan->ops->new_connection(pchan);
5093 		if (!chan) {
5094 			result = L2CAP_CR_LE_NO_MEM;
5095 			continue;
5096 		}
5097 
5098 		bacpy(&chan->src, &conn->hcon->src);
5099 		bacpy(&chan->dst, &conn->hcon->dst);
5100 		chan->src_type = bdaddr_src_type(conn->hcon);
5101 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5102 		chan->psm  = psm;
5103 		chan->dcid = scid;
5104 		chan->omtu = mtu;
5105 		chan->remote_mps = mps;
5106 
5107 		__l2cap_chan_add(conn, chan);
5108 
5109 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5110 
5111 		/* Init response */
5112 		if (!pdu.rsp.credits) {
5113 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
5114 			pdu.rsp.mps = cpu_to_le16(chan->mps);
5115 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
5116 		}
5117 
5118 		pdu.dcid[i] = cpu_to_le16(chan->scid);
5119 
5120 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5121 
5122 		chan->ident = cmd->ident;
5123 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5124 
5125 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5126 			l2cap_state_change(chan, BT_CONNECT2);
5127 			defer = true;
5128 			chan->ops->defer(chan);
5129 		} else {
5130 			l2cap_chan_ready(chan);
5131 		}
5132 	}
5133 
5134 unlock:
5135 	l2cap_chan_unlock(pchan);
5136 	mutex_unlock(&conn->chan_lock);
5137 	l2cap_chan_put(pchan);
5138 
5139 response:
5140 	pdu.rsp.result = cpu_to_le16(result);
5141 
5142 	if (defer)
5143 		return 0;
5144 
5145 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5146 		       sizeof(pdu.rsp) + len, &pdu);
5147 
5148 	return 0;
5149 }
5150 
5151 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5152 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5153 				       u8 *data)
5154 {
5155 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5156 	struct hci_conn *hcon = conn->hcon;
5157 	u16 mtu, mps, credits, result;
5158 	struct l2cap_chan *chan, *tmp;
5159 	int err = 0, sec_level;
5160 	int i = 0;
5161 
5162 	if (cmd_len < sizeof(*rsp))
5163 		return -EPROTO;
5164 
5165 	mtu     = __le16_to_cpu(rsp->mtu);
5166 	mps     = __le16_to_cpu(rsp->mps);
5167 	credits = __le16_to_cpu(rsp->credits);
5168 	result  = __le16_to_cpu(rsp->result);
5169 
5170 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5171 	       result);
5172 
5173 	mutex_lock(&conn->chan_lock);
5174 
5175 	cmd_len -= sizeof(*rsp);
5176 
5177 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5178 		u16 dcid;
5179 
5180 		if (chan->ident != cmd->ident ||
5181 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5182 		    chan->state == BT_CONNECTED)
5183 			continue;
5184 
5185 		l2cap_chan_lock(chan);
5186 
5187 		/* Check that there is a dcid for each pending channel */
5188 		if (cmd_len < sizeof(dcid)) {
5189 			l2cap_chan_del(chan, ECONNREFUSED);
5190 			l2cap_chan_unlock(chan);
5191 			continue;
5192 		}
5193 
5194 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5195 		cmd_len -= sizeof(u16);
5196 
5197 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5198 
5199 		/* Check if dcid is already in use */
5200 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5201 			/* If a device receives a
5202 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5203 			 * already-assigned Destination CID, then both the
5204 			 * original channel and the new channel shall be
5205 			 * immediately discarded and not used.
5206 			 */
5207 			l2cap_chan_del(chan, ECONNREFUSED);
5208 			l2cap_chan_unlock(chan);
5209 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5210 			l2cap_chan_lock(chan);
5211 			l2cap_chan_del(chan, ECONNRESET);
5212 			l2cap_chan_unlock(chan);
5213 			continue;
5214 		}
5215 
5216 		switch (result) {
5217 		case L2CAP_CR_LE_AUTHENTICATION:
5218 		case L2CAP_CR_LE_ENCRYPTION:
5219 			/* If we already have MITM protection we can't do
5220 			 * anything.
5221 			 */
5222 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5223 				l2cap_chan_del(chan, ECONNREFUSED);
5224 				break;
5225 			}
5226 
5227 			sec_level = hcon->sec_level + 1;
5228 			if (chan->sec_level < sec_level)
5229 				chan->sec_level = sec_level;
5230 
5231 			/* We'll need to send a new Connect Request */
5232 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5233 
5234 			smp_conn_security(hcon, chan->sec_level);
5235 			break;
5236 
5237 		case L2CAP_CR_LE_BAD_PSM:
5238 			l2cap_chan_del(chan, ECONNREFUSED);
5239 			break;
5240 
5241 		default:
5242 			/* If dcid was not set it means channels was refused */
5243 			if (!dcid) {
5244 				l2cap_chan_del(chan, ECONNREFUSED);
5245 				break;
5246 			}
5247 
5248 			chan->ident = 0;
5249 			chan->dcid = dcid;
5250 			chan->omtu = mtu;
5251 			chan->remote_mps = mps;
5252 			chan->tx_credits = credits;
5253 			l2cap_chan_ready(chan);
5254 			break;
5255 		}
5256 
5257 		l2cap_chan_unlock(chan);
5258 	}
5259 
5260 	mutex_unlock(&conn->chan_lock);
5261 
5262 	return err;
5263 }
5264 
5265 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5266 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5267 					 u8 *data)
5268 {
5269 	struct l2cap_ecred_reconf_req *req = (void *) data;
5270 	struct l2cap_ecred_reconf_rsp rsp;
5271 	u16 mtu, mps, result;
5272 	struct l2cap_chan *chan;
5273 	int i, num_scid;
5274 
5275 	if (!enable_ecred)
5276 		return -EINVAL;
5277 
5278 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5279 		result = L2CAP_CR_LE_INVALID_PARAMS;
5280 		goto respond;
5281 	}
5282 
5283 	mtu = __le16_to_cpu(req->mtu);
5284 	mps = __le16_to_cpu(req->mps);
5285 
5286 	BT_DBG("mtu %u mps %u", mtu, mps);
5287 
5288 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5289 		result = L2CAP_RECONF_INVALID_MTU;
5290 		goto respond;
5291 	}
5292 
5293 	if (mps < L2CAP_ECRED_MIN_MPS) {
5294 		result = L2CAP_RECONF_INVALID_MPS;
5295 		goto respond;
5296 	}
5297 
5298 	cmd_len -= sizeof(*req);
5299 	num_scid = cmd_len / sizeof(u16);
5300 	result = L2CAP_RECONF_SUCCESS;
5301 
5302 	for (i = 0; i < num_scid; i++) {
5303 		u16 scid;
5304 
5305 		scid = __le16_to_cpu(req->scid[i]);
5306 		if (!scid)
5307 			return -EPROTO;
5308 
5309 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5310 		if (!chan)
5311 			continue;
5312 
5313 		/* If the MTU value is decreased for any of the included
5314 		 * channels, then the receiver shall disconnect all
5315 		 * included channels.
5316 		 */
5317 		if (chan->omtu > mtu) {
5318 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5319 			       chan->omtu, mtu);
5320 			result = L2CAP_RECONF_INVALID_MTU;
5321 		}
5322 
5323 		chan->omtu = mtu;
5324 		chan->remote_mps = mps;
5325 	}
5326 
5327 respond:
5328 	rsp.result = cpu_to_le16(result);
5329 
5330 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5331 		       &rsp);
5332 
5333 	return 0;
5334 }
5335 
5336 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5337 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5338 					 u8 *data)
5339 {
5340 	struct l2cap_chan *chan, *tmp;
5341 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5342 	u16 result;
5343 
5344 	if (cmd_len < sizeof(*rsp))
5345 		return -EPROTO;
5346 
5347 	result = __le16_to_cpu(rsp->result);
5348 
5349 	BT_DBG("result 0x%4.4x", rsp->result);
5350 
5351 	if (!result)
5352 		return 0;
5353 
5354 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5355 		if (chan->ident != cmd->ident)
5356 			continue;
5357 
5358 		l2cap_chan_del(chan, ECONNRESET);
5359 	}
5360 
5361 	return 0;
5362 }
5363 
5364 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5365 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5366 				       u8 *data)
5367 {
5368 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5369 	struct l2cap_chan *chan;
5370 
5371 	if (cmd_len < sizeof(*rej))
5372 		return -EPROTO;
5373 
5374 	mutex_lock(&conn->chan_lock);
5375 
5376 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5377 	if (!chan)
5378 		goto done;
5379 
5380 	chan = l2cap_chan_hold_unless_zero(chan);
5381 	if (!chan)
5382 		goto done;
5383 
5384 	l2cap_chan_lock(chan);
5385 	l2cap_chan_del(chan, ECONNREFUSED);
5386 	l2cap_chan_unlock(chan);
5387 	l2cap_chan_put(chan);
5388 
5389 done:
5390 	mutex_unlock(&conn->chan_lock);
5391 	return 0;
5392 }
5393 
5394 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5395 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5396 				   u8 *data)
5397 {
5398 	int err = 0;
5399 
5400 	switch (cmd->code) {
5401 	case L2CAP_COMMAND_REJ:
5402 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5403 		break;
5404 
5405 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5406 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5407 		break;
5408 
5409 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5410 		break;
5411 
5412 	case L2CAP_LE_CONN_RSP:
5413 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5414 		break;
5415 
5416 	case L2CAP_LE_CONN_REQ:
5417 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5418 		break;
5419 
5420 	case L2CAP_LE_CREDITS:
5421 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5422 		break;
5423 
5424 	case L2CAP_ECRED_CONN_REQ:
5425 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5426 		break;
5427 
5428 	case L2CAP_ECRED_CONN_RSP:
5429 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5430 		break;
5431 
5432 	case L2CAP_ECRED_RECONF_REQ:
5433 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5434 		break;
5435 
5436 	case L2CAP_ECRED_RECONF_RSP:
5437 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5438 		break;
5439 
5440 	case L2CAP_DISCONN_REQ:
5441 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5442 		break;
5443 
5444 	case L2CAP_DISCONN_RSP:
5445 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5446 		break;
5447 
5448 	default:
5449 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5450 		err = -EINVAL;
5451 		break;
5452 	}
5453 
5454 	return err;
5455 }
5456 
5457 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5458 					struct sk_buff *skb)
5459 {
5460 	struct hci_conn *hcon = conn->hcon;
5461 	struct l2cap_cmd_hdr *cmd;
5462 	u16 len;
5463 	int err;
5464 
5465 	if (hcon->type != LE_LINK)
5466 		goto drop;
5467 
5468 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5469 		goto drop;
5470 
5471 	cmd = (void *) skb->data;
5472 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5473 
5474 	len = le16_to_cpu(cmd->len);
5475 
5476 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5477 
5478 	if (len != skb->len || !cmd->ident) {
5479 		BT_DBG("corrupted command");
5480 		goto drop;
5481 	}
5482 
5483 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5484 	if (err) {
5485 		struct l2cap_cmd_rej_unk rej;
5486 
5487 		BT_ERR("Wrong link type (%d)", err);
5488 
5489 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5490 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5491 			       sizeof(rej), &rej);
5492 	}
5493 
5494 drop:
5495 	kfree_skb(skb);
5496 }
5497 
5498 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5499 {
5500 	struct l2cap_cmd_rej_unk rej;
5501 
5502 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5503 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5504 }
5505 
5506 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5507 				     struct sk_buff *skb)
5508 {
5509 	struct hci_conn *hcon = conn->hcon;
5510 	struct l2cap_cmd_hdr *cmd;
5511 	int err;
5512 
5513 	l2cap_raw_recv(conn, skb);
5514 
5515 	if (hcon->type != ACL_LINK)
5516 		goto drop;
5517 
5518 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5519 		u16 len;
5520 
5521 		cmd = (void *) skb->data;
5522 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5523 
5524 		len = le16_to_cpu(cmd->len);
5525 
5526 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5527 		       cmd->ident);
5528 
5529 		if (len > skb->len || !cmd->ident) {
5530 			BT_DBG("corrupted command");
5531 			l2cap_sig_send_rej(conn, cmd->ident);
5532 			skb_pull(skb, len > skb->len ? skb->len : len);
5533 			continue;
5534 		}
5535 
5536 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5537 		if (err) {
5538 			BT_ERR("Wrong link type (%d)", err);
5539 			l2cap_sig_send_rej(conn, cmd->ident);
5540 		}
5541 
5542 		skb_pull(skb, len);
5543 	}
5544 
5545 	if (skb->len > 0) {
5546 		BT_DBG("corrupted command");
5547 		l2cap_sig_send_rej(conn, 0);
5548 	}
5549 
5550 drop:
5551 	kfree_skb(skb);
5552 }
5553 
5554 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5555 {
5556 	u16 our_fcs, rcv_fcs;
5557 	int hdr_size;
5558 
5559 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5560 		hdr_size = L2CAP_EXT_HDR_SIZE;
5561 	else
5562 		hdr_size = L2CAP_ENH_HDR_SIZE;
5563 
5564 	if (chan->fcs == L2CAP_FCS_CRC16) {
5565 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5566 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5567 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5568 
5569 		if (our_fcs != rcv_fcs)
5570 			return -EBADMSG;
5571 	}
5572 	return 0;
5573 }
5574 
5575 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5576 {
5577 	struct l2cap_ctrl control;
5578 
5579 	BT_DBG("chan %p", chan);
5580 
5581 	memset(&control, 0, sizeof(control));
5582 	control.sframe = 1;
5583 	control.final = 1;
5584 	control.reqseq = chan->buffer_seq;
5585 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5586 
5587 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5588 		control.super = L2CAP_SUPER_RNR;
5589 		l2cap_send_sframe(chan, &control);
5590 	}
5591 
5592 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5593 	    chan->unacked_frames > 0)
5594 		__set_retrans_timer(chan);
5595 
5596 	/* Send pending iframes */
5597 	l2cap_ertm_send(chan);
5598 
5599 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5600 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5601 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5602 		 * send it now.
5603 		 */
5604 		control.super = L2CAP_SUPER_RR;
5605 		l2cap_send_sframe(chan, &control);
5606 	}
5607 }
5608 
5609 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5610 			    struct sk_buff **last_frag)
5611 {
5612 	/* skb->len reflects data in skb as well as all fragments
5613 	 * skb->data_len reflects only data in fragments
5614 	 */
5615 	if (!skb_has_frag_list(skb))
5616 		skb_shinfo(skb)->frag_list = new_frag;
5617 
5618 	new_frag->next = NULL;
5619 
5620 	(*last_frag)->next = new_frag;
5621 	*last_frag = new_frag;
5622 
5623 	skb->len += new_frag->len;
5624 	skb->data_len += new_frag->len;
5625 	skb->truesize += new_frag->truesize;
5626 }
5627 
5628 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5629 				struct l2cap_ctrl *control)
5630 {
5631 	int err = -EINVAL;
5632 
5633 	switch (control->sar) {
5634 	case L2CAP_SAR_UNSEGMENTED:
5635 		if (chan->sdu)
5636 			break;
5637 
5638 		err = chan->ops->recv(chan, skb);
5639 		break;
5640 
5641 	case L2CAP_SAR_START:
5642 		if (chan->sdu)
5643 			break;
5644 
5645 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5646 			break;
5647 
5648 		chan->sdu_len = get_unaligned_le16(skb->data);
5649 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5650 
5651 		if (chan->sdu_len > chan->imtu) {
5652 			err = -EMSGSIZE;
5653 			break;
5654 		}
5655 
5656 		if (skb->len >= chan->sdu_len)
5657 			break;
5658 
5659 		chan->sdu = skb;
5660 		chan->sdu_last_frag = skb;
5661 
5662 		skb = NULL;
5663 		err = 0;
5664 		break;
5665 
5666 	case L2CAP_SAR_CONTINUE:
5667 		if (!chan->sdu)
5668 			break;
5669 
5670 		append_skb_frag(chan->sdu, skb,
5671 				&chan->sdu_last_frag);
5672 		skb = NULL;
5673 
5674 		if (chan->sdu->len >= chan->sdu_len)
5675 			break;
5676 
5677 		err = 0;
5678 		break;
5679 
5680 	case L2CAP_SAR_END:
5681 		if (!chan->sdu)
5682 			break;
5683 
5684 		append_skb_frag(chan->sdu, skb,
5685 				&chan->sdu_last_frag);
5686 		skb = NULL;
5687 
5688 		if (chan->sdu->len != chan->sdu_len)
5689 			break;
5690 
5691 		err = chan->ops->recv(chan, chan->sdu);
5692 
5693 		if (!err) {
5694 			/* Reassembly complete */
5695 			chan->sdu = NULL;
5696 			chan->sdu_last_frag = NULL;
5697 			chan->sdu_len = 0;
5698 		}
5699 		break;
5700 	}
5701 
5702 	if (err) {
5703 		kfree_skb(skb);
5704 		kfree_skb(chan->sdu);
5705 		chan->sdu = NULL;
5706 		chan->sdu_last_frag = NULL;
5707 		chan->sdu_len = 0;
5708 	}
5709 
5710 	return err;
5711 }
5712 
5713 static int l2cap_resegment(struct l2cap_chan *chan)
5714 {
5715 	/* Placeholder */
5716 	return 0;
5717 }
5718 
5719 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5720 {
5721 	u8 event;
5722 
5723 	if (chan->mode != L2CAP_MODE_ERTM)
5724 		return;
5725 
5726 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5727 	l2cap_tx(chan, NULL, NULL, event);
5728 }
5729 
5730 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5731 {
5732 	int err = 0;
5733 	/* Pass sequential frames to l2cap_reassemble_sdu()
5734 	 * until a gap is encountered.
5735 	 */
5736 
5737 	BT_DBG("chan %p", chan);
5738 
5739 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5740 		struct sk_buff *skb;
5741 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5742 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5743 
5744 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5745 
5746 		if (!skb)
5747 			break;
5748 
5749 		skb_unlink(skb, &chan->srej_q);
5750 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5751 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5752 		if (err)
5753 			break;
5754 	}
5755 
5756 	if (skb_queue_empty(&chan->srej_q)) {
5757 		chan->rx_state = L2CAP_RX_STATE_RECV;
5758 		l2cap_send_ack(chan);
5759 	}
5760 
5761 	return err;
5762 }
5763 
5764 static void l2cap_handle_srej(struct l2cap_chan *chan,
5765 			      struct l2cap_ctrl *control)
5766 {
5767 	struct sk_buff *skb;
5768 
5769 	BT_DBG("chan %p, control %p", chan, control);
5770 
5771 	if (control->reqseq == chan->next_tx_seq) {
5772 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5773 		l2cap_send_disconn_req(chan, ECONNRESET);
5774 		return;
5775 	}
5776 
5777 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5778 
5779 	if (skb == NULL) {
5780 		BT_DBG("Seq %d not available for retransmission",
5781 		       control->reqseq);
5782 		return;
5783 	}
5784 
5785 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5786 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5787 		l2cap_send_disconn_req(chan, ECONNRESET);
5788 		return;
5789 	}
5790 
5791 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5792 
5793 	if (control->poll) {
5794 		l2cap_pass_to_tx(chan, control);
5795 
5796 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5797 		l2cap_retransmit(chan, control);
5798 		l2cap_ertm_send(chan);
5799 
5800 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5801 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5802 			chan->srej_save_reqseq = control->reqseq;
5803 		}
5804 	} else {
5805 		l2cap_pass_to_tx_fbit(chan, control);
5806 
5807 		if (control->final) {
5808 			if (chan->srej_save_reqseq != control->reqseq ||
5809 			    !test_and_clear_bit(CONN_SREJ_ACT,
5810 						&chan->conn_state))
5811 				l2cap_retransmit(chan, control);
5812 		} else {
5813 			l2cap_retransmit(chan, control);
5814 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5815 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5816 				chan->srej_save_reqseq = control->reqseq;
5817 			}
5818 		}
5819 	}
5820 }
5821 
5822 static void l2cap_handle_rej(struct l2cap_chan *chan,
5823 			     struct l2cap_ctrl *control)
5824 {
5825 	struct sk_buff *skb;
5826 
5827 	BT_DBG("chan %p, control %p", chan, control);
5828 
5829 	if (control->reqseq == chan->next_tx_seq) {
5830 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5831 		l2cap_send_disconn_req(chan, ECONNRESET);
5832 		return;
5833 	}
5834 
5835 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5836 
5837 	if (chan->max_tx && skb &&
5838 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5839 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5840 		l2cap_send_disconn_req(chan, ECONNRESET);
5841 		return;
5842 	}
5843 
5844 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5845 
5846 	l2cap_pass_to_tx(chan, control);
5847 
5848 	if (control->final) {
5849 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5850 			l2cap_retransmit_all(chan, control);
5851 	} else {
5852 		l2cap_retransmit_all(chan, control);
5853 		l2cap_ertm_send(chan);
5854 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5855 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5856 	}
5857 }
5858 
5859 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5860 {
5861 	BT_DBG("chan %p, txseq %d", chan, txseq);
5862 
5863 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5864 	       chan->expected_tx_seq);
5865 
5866 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5867 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5868 		    chan->tx_win) {
5869 			/* See notes below regarding "double poll" and
5870 			 * invalid packets.
5871 			 */
5872 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5873 				BT_DBG("Invalid/Ignore - after SREJ");
5874 				return L2CAP_TXSEQ_INVALID_IGNORE;
5875 			} else {
5876 				BT_DBG("Invalid - in window after SREJ sent");
5877 				return L2CAP_TXSEQ_INVALID;
5878 			}
5879 		}
5880 
5881 		if (chan->srej_list.head == txseq) {
5882 			BT_DBG("Expected SREJ");
5883 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5884 		}
5885 
5886 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5887 			BT_DBG("Duplicate SREJ - txseq already stored");
5888 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5889 		}
5890 
5891 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5892 			BT_DBG("Unexpected SREJ - not requested");
5893 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5894 		}
5895 	}
5896 
5897 	if (chan->expected_tx_seq == txseq) {
5898 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5899 		    chan->tx_win) {
5900 			BT_DBG("Invalid - txseq outside tx window");
5901 			return L2CAP_TXSEQ_INVALID;
5902 		} else {
5903 			BT_DBG("Expected");
5904 			return L2CAP_TXSEQ_EXPECTED;
5905 		}
5906 	}
5907 
5908 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5909 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5910 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5911 		return L2CAP_TXSEQ_DUPLICATE;
5912 	}
5913 
5914 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5915 		/* A source of invalid packets is a "double poll" condition,
5916 		 * where delays cause us to send multiple poll packets.  If
5917 		 * the remote stack receives and processes both polls,
5918 		 * sequence numbers can wrap around in such a way that a
5919 		 * resent frame has a sequence number that looks like new data
5920 		 * with a sequence gap.  This would trigger an erroneous SREJ
5921 		 * request.
5922 		 *
5923 		 * Fortunately, this is impossible with a tx window that's
5924 		 * less than half of the maximum sequence number, which allows
5925 		 * invalid frames to be safely ignored.
5926 		 *
5927 		 * With tx window sizes greater than half of the tx window
5928 		 * maximum, the frame is invalid and cannot be ignored.  This
5929 		 * causes a disconnect.
5930 		 */
5931 
5932 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5933 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5934 			return L2CAP_TXSEQ_INVALID_IGNORE;
5935 		} else {
5936 			BT_DBG("Invalid - txseq outside tx window");
5937 			return L2CAP_TXSEQ_INVALID;
5938 		}
5939 	} else {
5940 		BT_DBG("Unexpected - txseq indicates missing frames");
5941 		return L2CAP_TXSEQ_UNEXPECTED;
5942 	}
5943 }
5944 
5945 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5946 			       struct l2cap_ctrl *control,
5947 			       struct sk_buff *skb, u8 event)
5948 {
5949 	struct l2cap_ctrl local_control;
5950 	int err = 0;
5951 	bool skb_in_use = false;
5952 
5953 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5954 	       event);
5955 
5956 	switch (event) {
5957 	case L2CAP_EV_RECV_IFRAME:
5958 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5959 		case L2CAP_TXSEQ_EXPECTED:
5960 			l2cap_pass_to_tx(chan, control);
5961 
5962 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5963 				BT_DBG("Busy, discarding expected seq %d",
5964 				       control->txseq);
5965 				break;
5966 			}
5967 
5968 			chan->expected_tx_seq = __next_seq(chan,
5969 							   control->txseq);
5970 
5971 			chan->buffer_seq = chan->expected_tx_seq;
5972 			skb_in_use = true;
5973 
5974 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5975 			 * control, so make a copy in advance to use it after
5976 			 * l2cap_reassemble_sdu returns and to avoid the race
5977 			 * condition, for example:
5978 			 *
5979 			 * The current thread calls:
5980 			 *   l2cap_reassemble_sdu
5981 			 *     chan->ops->recv == l2cap_sock_recv_cb
5982 			 *       __sock_queue_rcv_skb
5983 			 * Another thread calls:
5984 			 *   bt_sock_recvmsg
5985 			 *     skb_recv_datagram
5986 			 *     skb_free_datagram
5987 			 * Then the current thread tries to access control, but
5988 			 * it was freed by skb_free_datagram.
5989 			 */
5990 			local_control = *control;
5991 			err = l2cap_reassemble_sdu(chan, skb, control);
5992 			if (err)
5993 				break;
5994 
5995 			if (local_control.final) {
5996 				if (!test_and_clear_bit(CONN_REJ_ACT,
5997 							&chan->conn_state)) {
5998 					local_control.final = 0;
5999 					l2cap_retransmit_all(chan, &local_control);
6000 					l2cap_ertm_send(chan);
6001 				}
6002 			}
6003 
6004 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6005 				l2cap_send_ack(chan);
6006 			break;
6007 		case L2CAP_TXSEQ_UNEXPECTED:
6008 			l2cap_pass_to_tx(chan, control);
6009 
6010 			/* Can't issue SREJ frames in the local busy state.
6011 			 * Drop this frame, it will be seen as missing
6012 			 * when local busy is exited.
6013 			 */
6014 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6015 				BT_DBG("Busy, discarding unexpected seq %d",
6016 				       control->txseq);
6017 				break;
6018 			}
6019 
6020 			/* There was a gap in the sequence, so an SREJ
6021 			 * must be sent for each missing frame.  The
6022 			 * current frame is stored for later use.
6023 			 */
6024 			skb_queue_tail(&chan->srej_q, skb);
6025 			skb_in_use = true;
6026 			BT_DBG("Queued %p (queue len %d)", skb,
6027 			       skb_queue_len(&chan->srej_q));
6028 
6029 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6030 			l2cap_seq_list_clear(&chan->srej_list);
6031 			l2cap_send_srej(chan, control->txseq);
6032 
6033 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6034 			break;
6035 		case L2CAP_TXSEQ_DUPLICATE:
6036 			l2cap_pass_to_tx(chan, control);
6037 			break;
6038 		case L2CAP_TXSEQ_INVALID_IGNORE:
6039 			break;
6040 		case L2CAP_TXSEQ_INVALID:
6041 		default:
6042 			l2cap_send_disconn_req(chan, ECONNRESET);
6043 			break;
6044 		}
6045 		break;
6046 	case L2CAP_EV_RECV_RR:
6047 		l2cap_pass_to_tx(chan, control);
6048 		if (control->final) {
6049 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6050 
6051 			if (!test_and_clear_bit(CONN_REJ_ACT,
6052 						&chan->conn_state)) {
6053 				control->final = 0;
6054 				l2cap_retransmit_all(chan, control);
6055 			}
6056 
6057 			l2cap_ertm_send(chan);
6058 		} else if (control->poll) {
6059 			l2cap_send_i_or_rr_or_rnr(chan);
6060 		} else {
6061 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6062 					       &chan->conn_state) &&
6063 			    chan->unacked_frames)
6064 				__set_retrans_timer(chan);
6065 
6066 			l2cap_ertm_send(chan);
6067 		}
6068 		break;
6069 	case L2CAP_EV_RECV_RNR:
6070 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6071 		l2cap_pass_to_tx(chan, control);
6072 		if (control && control->poll) {
6073 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6074 			l2cap_send_rr_or_rnr(chan, 0);
6075 		}
6076 		__clear_retrans_timer(chan);
6077 		l2cap_seq_list_clear(&chan->retrans_list);
6078 		break;
6079 	case L2CAP_EV_RECV_REJ:
6080 		l2cap_handle_rej(chan, control);
6081 		break;
6082 	case L2CAP_EV_RECV_SREJ:
6083 		l2cap_handle_srej(chan, control);
6084 		break;
6085 	default:
6086 		break;
6087 	}
6088 
6089 	if (skb && !skb_in_use) {
6090 		BT_DBG("Freeing %p", skb);
6091 		kfree_skb(skb);
6092 	}
6093 
6094 	return err;
6095 }
6096 
6097 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6098 				    struct l2cap_ctrl *control,
6099 				    struct sk_buff *skb, u8 event)
6100 {
6101 	int err = 0;
6102 	u16 txseq = control->txseq;
6103 	bool skb_in_use = false;
6104 
6105 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6106 	       event);
6107 
6108 	switch (event) {
6109 	case L2CAP_EV_RECV_IFRAME:
6110 		switch (l2cap_classify_txseq(chan, txseq)) {
6111 		case L2CAP_TXSEQ_EXPECTED:
6112 			/* Keep frame for reassembly later */
6113 			l2cap_pass_to_tx(chan, control);
6114 			skb_queue_tail(&chan->srej_q, skb);
6115 			skb_in_use = true;
6116 			BT_DBG("Queued %p (queue len %d)", skb,
6117 			       skb_queue_len(&chan->srej_q));
6118 
6119 			chan->expected_tx_seq = __next_seq(chan, txseq);
6120 			break;
6121 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6122 			l2cap_seq_list_pop(&chan->srej_list);
6123 
6124 			l2cap_pass_to_tx(chan, control);
6125 			skb_queue_tail(&chan->srej_q, skb);
6126 			skb_in_use = true;
6127 			BT_DBG("Queued %p (queue len %d)", skb,
6128 			       skb_queue_len(&chan->srej_q));
6129 
6130 			err = l2cap_rx_queued_iframes(chan);
6131 			if (err)
6132 				break;
6133 
6134 			break;
6135 		case L2CAP_TXSEQ_UNEXPECTED:
6136 			/* Got a frame that can't be reassembled yet.
6137 			 * Save it for later, and send SREJs to cover
6138 			 * the missing frames.
6139 			 */
6140 			skb_queue_tail(&chan->srej_q, skb);
6141 			skb_in_use = true;
6142 			BT_DBG("Queued %p (queue len %d)", skb,
6143 			       skb_queue_len(&chan->srej_q));
6144 
6145 			l2cap_pass_to_tx(chan, control);
6146 			l2cap_send_srej(chan, control->txseq);
6147 			break;
6148 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6149 			/* This frame was requested with an SREJ, but
6150 			 * some expected retransmitted frames are
6151 			 * missing.  Request retransmission of missing
6152 			 * SREJ'd frames.
6153 			 */
6154 			skb_queue_tail(&chan->srej_q, skb);
6155 			skb_in_use = true;
6156 			BT_DBG("Queued %p (queue len %d)", skb,
6157 			       skb_queue_len(&chan->srej_q));
6158 
6159 			l2cap_pass_to_tx(chan, control);
6160 			l2cap_send_srej_list(chan, control->txseq);
6161 			break;
6162 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6163 			/* We've already queued this frame.  Drop this copy. */
6164 			l2cap_pass_to_tx(chan, control);
6165 			break;
6166 		case L2CAP_TXSEQ_DUPLICATE:
6167 			/* Expecting a later sequence number, so this frame
6168 			 * was already received.  Ignore it completely.
6169 			 */
6170 			break;
6171 		case L2CAP_TXSEQ_INVALID_IGNORE:
6172 			break;
6173 		case L2CAP_TXSEQ_INVALID:
6174 		default:
6175 			l2cap_send_disconn_req(chan, ECONNRESET);
6176 			break;
6177 		}
6178 		break;
6179 	case L2CAP_EV_RECV_RR:
6180 		l2cap_pass_to_tx(chan, control);
6181 		if (control->final) {
6182 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6183 
6184 			if (!test_and_clear_bit(CONN_REJ_ACT,
6185 						&chan->conn_state)) {
6186 				control->final = 0;
6187 				l2cap_retransmit_all(chan, control);
6188 			}
6189 
6190 			l2cap_ertm_send(chan);
6191 		} else if (control->poll) {
6192 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6193 					       &chan->conn_state) &&
6194 			    chan->unacked_frames) {
6195 				__set_retrans_timer(chan);
6196 			}
6197 
6198 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6199 			l2cap_send_srej_tail(chan);
6200 		} else {
6201 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6202 					       &chan->conn_state) &&
6203 			    chan->unacked_frames)
6204 				__set_retrans_timer(chan);
6205 
6206 			l2cap_send_ack(chan);
6207 		}
6208 		break;
6209 	case L2CAP_EV_RECV_RNR:
6210 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6211 		l2cap_pass_to_tx(chan, control);
6212 		if (control->poll) {
6213 			l2cap_send_srej_tail(chan);
6214 		} else {
6215 			struct l2cap_ctrl rr_control;
6216 			memset(&rr_control, 0, sizeof(rr_control));
6217 			rr_control.sframe = 1;
6218 			rr_control.super = L2CAP_SUPER_RR;
6219 			rr_control.reqseq = chan->buffer_seq;
6220 			l2cap_send_sframe(chan, &rr_control);
6221 		}
6222 
6223 		break;
6224 	case L2CAP_EV_RECV_REJ:
6225 		l2cap_handle_rej(chan, control);
6226 		break;
6227 	case L2CAP_EV_RECV_SREJ:
6228 		l2cap_handle_srej(chan, control);
6229 		break;
6230 	}
6231 
6232 	if (skb && !skb_in_use) {
6233 		BT_DBG("Freeing %p", skb);
6234 		kfree_skb(skb);
6235 	}
6236 
6237 	return err;
6238 }
6239 
6240 static int l2cap_finish_move(struct l2cap_chan *chan)
6241 {
6242 	BT_DBG("chan %p", chan);
6243 
6244 	chan->rx_state = L2CAP_RX_STATE_RECV;
6245 	chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6246 
6247 	return l2cap_resegment(chan);
6248 }
6249 
6250 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6251 				 struct l2cap_ctrl *control,
6252 				 struct sk_buff *skb, u8 event)
6253 {
6254 	int err;
6255 
6256 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6257 	       event);
6258 
6259 	if (!control->poll)
6260 		return -EPROTO;
6261 
6262 	l2cap_process_reqseq(chan, control->reqseq);
6263 
6264 	if (!skb_queue_empty(&chan->tx_q))
6265 		chan->tx_send_head = skb_peek(&chan->tx_q);
6266 	else
6267 		chan->tx_send_head = NULL;
6268 
6269 	/* Rewind next_tx_seq to the point expected
6270 	 * by the receiver.
6271 	 */
6272 	chan->next_tx_seq = control->reqseq;
6273 	chan->unacked_frames = 0;
6274 
6275 	err = l2cap_finish_move(chan);
6276 	if (err)
6277 		return err;
6278 
6279 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6280 	l2cap_send_i_or_rr_or_rnr(chan);
6281 
6282 	if (event == L2CAP_EV_RECV_IFRAME)
6283 		return -EPROTO;
6284 
6285 	return l2cap_rx_state_recv(chan, control, NULL, event);
6286 }
6287 
6288 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6289 				 struct l2cap_ctrl *control,
6290 				 struct sk_buff *skb, u8 event)
6291 {
6292 	int err;
6293 
6294 	if (!control->final)
6295 		return -EPROTO;
6296 
6297 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6298 
6299 	chan->rx_state = L2CAP_RX_STATE_RECV;
6300 	l2cap_process_reqseq(chan, control->reqseq);
6301 
6302 	if (!skb_queue_empty(&chan->tx_q))
6303 		chan->tx_send_head = skb_peek(&chan->tx_q);
6304 	else
6305 		chan->tx_send_head = NULL;
6306 
6307 	/* Rewind next_tx_seq to the point expected
6308 	 * by the receiver.
6309 	 */
6310 	chan->next_tx_seq = control->reqseq;
6311 	chan->unacked_frames = 0;
6312 	chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6313 
6314 	err = l2cap_resegment(chan);
6315 
6316 	if (!err)
6317 		err = l2cap_rx_state_recv(chan, control, skb, event);
6318 
6319 	return err;
6320 }
6321 
6322 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6323 {
6324 	/* Make sure reqseq is for a packet that has been sent but not acked */
6325 	u16 unacked;
6326 
6327 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6328 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6329 }
6330 
6331 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6332 		    struct sk_buff *skb, u8 event)
6333 {
6334 	int err = 0;
6335 
6336 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6337 	       control, skb, event, chan->rx_state);
6338 
6339 	if (__valid_reqseq(chan, control->reqseq)) {
6340 		switch (chan->rx_state) {
6341 		case L2CAP_RX_STATE_RECV:
6342 			err = l2cap_rx_state_recv(chan, control, skb, event);
6343 			break;
6344 		case L2CAP_RX_STATE_SREJ_SENT:
6345 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6346 						       event);
6347 			break;
6348 		case L2CAP_RX_STATE_WAIT_P:
6349 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6350 			break;
6351 		case L2CAP_RX_STATE_WAIT_F:
6352 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6353 			break;
6354 		default:
6355 			/* shut it down */
6356 			break;
6357 		}
6358 	} else {
6359 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6360 		       control->reqseq, chan->next_tx_seq,
6361 		       chan->expected_ack_seq);
6362 		l2cap_send_disconn_req(chan, ECONNRESET);
6363 	}
6364 
6365 	return err;
6366 }
6367 
6368 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6369 			   struct sk_buff *skb)
6370 {
6371 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6372 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6373 	 * returns and to avoid the race condition, for example:
6374 	 *
6375 	 * The current thread calls:
6376 	 *   l2cap_reassemble_sdu
6377 	 *     chan->ops->recv == l2cap_sock_recv_cb
6378 	 *       __sock_queue_rcv_skb
6379 	 * Another thread calls:
6380 	 *   bt_sock_recvmsg
6381 	 *     skb_recv_datagram
6382 	 *     skb_free_datagram
6383 	 * Then the current thread tries to access control, but it was freed by
6384 	 * skb_free_datagram.
6385 	 */
6386 	u16 txseq = control->txseq;
6387 
6388 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6389 	       chan->rx_state);
6390 
6391 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6392 		l2cap_pass_to_tx(chan, control);
6393 
6394 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6395 		       __next_seq(chan, chan->buffer_seq));
6396 
6397 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6398 
6399 		l2cap_reassemble_sdu(chan, skb, control);
6400 	} else {
6401 		if (chan->sdu) {
6402 			kfree_skb(chan->sdu);
6403 			chan->sdu = NULL;
6404 		}
6405 		chan->sdu_last_frag = NULL;
6406 		chan->sdu_len = 0;
6407 
6408 		if (skb) {
6409 			BT_DBG("Freeing %p", skb);
6410 			kfree_skb(skb);
6411 		}
6412 	}
6413 
6414 	chan->last_acked_seq = txseq;
6415 	chan->expected_tx_seq = __next_seq(chan, txseq);
6416 
6417 	return 0;
6418 }
6419 
6420 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6421 {
6422 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6423 	u16 len;
6424 	u8 event;
6425 
6426 	__unpack_control(chan, skb);
6427 
6428 	len = skb->len;
6429 
6430 	/*
6431 	 * We can just drop the corrupted I-frame here.
6432 	 * Receiver will miss it and start proper recovery
6433 	 * procedures and ask for retransmission.
6434 	 */
6435 	if (l2cap_check_fcs(chan, skb))
6436 		goto drop;
6437 
6438 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6439 		len -= L2CAP_SDULEN_SIZE;
6440 
6441 	if (chan->fcs == L2CAP_FCS_CRC16)
6442 		len -= L2CAP_FCS_SIZE;
6443 
6444 	if (len > chan->mps) {
6445 		l2cap_send_disconn_req(chan, ECONNRESET);
6446 		goto drop;
6447 	}
6448 
6449 	if (chan->ops->filter) {
6450 		if (chan->ops->filter(chan, skb))
6451 			goto drop;
6452 	}
6453 
6454 	if (!control->sframe) {
6455 		int err;
6456 
6457 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6458 		       control->sar, control->reqseq, control->final,
6459 		       control->txseq);
6460 
6461 		/* Validate F-bit - F=0 always valid, F=1 only
6462 		 * valid in TX WAIT_F
6463 		 */
6464 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6465 			goto drop;
6466 
6467 		if (chan->mode != L2CAP_MODE_STREAMING) {
6468 			event = L2CAP_EV_RECV_IFRAME;
6469 			err = l2cap_rx(chan, control, skb, event);
6470 		} else {
6471 			err = l2cap_stream_rx(chan, control, skb);
6472 		}
6473 
6474 		if (err)
6475 			l2cap_send_disconn_req(chan, ECONNRESET);
6476 	} else {
6477 		const u8 rx_func_to_event[4] = {
6478 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6479 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6480 		};
6481 
6482 		/* Only I-frames are expected in streaming mode */
6483 		if (chan->mode == L2CAP_MODE_STREAMING)
6484 			goto drop;
6485 
6486 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6487 		       control->reqseq, control->final, control->poll,
6488 		       control->super);
6489 
6490 		if (len != 0) {
6491 			BT_ERR("Trailing bytes: %d in sframe", len);
6492 			l2cap_send_disconn_req(chan, ECONNRESET);
6493 			goto drop;
6494 		}
6495 
6496 		/* Validate F and P bits */
6497 		if (control->final && (control->poll ||
6498 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6499 			goto drop;
6500 
6501 		event = rx_func_to_event[control->super];
6502 		if (l2cap_rx(chan, control, skb, event))
6503 			l2cap_send_disconn_req(chan, ECONNRESET);
6504 	}
6505 
6506 	return 0;
6507 
6508 drop:
6509 	kfree_skb(skb);
6510 	return 0;
6511 }
6512 
6513 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6514 {
6515 	struct l2cap_conn *conn = chan->conn;
6516 	struct l2cap_le_credits pkt;
6517 	u16 return_credits;
6518 
6519 	return_credits = (chan->imtu / chan->mps) + 1;
6520 
6521 	if (chan->rx_credits >= return_credits)
6522 		return;
6523 
6524 	return_credits -= chan->rx_credits;
6525 
6526 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6527 
6528 	chan->rx_credits += return_credits;
6529 
6530 	pkt.cid     = cpu_to_le16(chan->scid);
6531 	pkt.credits = cpu_to_le16(return_credits);
6532 
6533 	chan->ident = l2cap_get_ident(conn);
6534 
6535 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6536 }
6537 
6538 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6539 {
6540 	int err;
6541 
6542 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6543 
6544 	/* Wait recv to confirm reception before updating the credits */
6545 	err = chan->ops->recv(chan, skb);
6546 
6547 	/* Update credits whenever an SDU is received */
6548 	l2cap_chan_le_send_credits(chan);
6549 
6550 	return err;
6551 }
6552 
6553 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6554 {
6555 	int err;
6556 
6557 	if (!chan->rx_credits) {
6558 		BT_ERR("No credits to receive LE L2CAP data");
6559 		l2cap_send_disconn_req(chan, ECONNRESET);
6560 		return -ENOBUFS;
6561 	}
6562 
6563 	if (chan->imtu < skb->len) {
6564 		BT_ERR("Too big LE L2CAP PDU");
6565 		return -ENOBUFS;
6566 	}
6567 
6568 	chan->rx_credits--;
6569 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6570 
6571 	/* Update if remote had run out of credits, this should only happens
6572 	 * if the remote is not using the entire MPS.
6573 	 */
6574 	if (!chan->rx_credits)
6575 		l2cap_chan_le_send_credits(chan);
6576 
6577 	err = 0;
6578 
6579 	if (!chan->sdu) {
6580 		u16 sdu_len;
6581 
6582 		sdu_len = get_unaligned_le16(skb->data);
6583 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6584 
6585 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6586 		       sdu_len, skb->len, chan->imtu);
6587 
6588 		if (sdu_len > chan->imtu) {
6589 			BT_ERR("Too big LE L2CAP SDU length received");
6590 			err = -EMSGSIZE;
6591 			goto failed;
6592 		}
6593 
6594 		if (skb->len > sdu_len) {
6595 			BT_ERR("Too much LE L2CAP data received");
6596 			err = -EINVAL;
6597 			goto failed;
6598 		}
6599 
6600 		if (skb->len == sdu_len)
6601 			return l2cap_ecred_recv(chan, skb);
6602 
6603 		chan->sdu = skb;
6604 		chan->sdu_len = sdu_len;
6605 		chan->sdu_last_frag = skb;
6606 
6607 		/* Detect if remote is not able to use the selected MPS */
6608 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6609 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6610 
6611 			/* Adjust the number of credits */
6612 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6613 			chan->mps = mps_len;
6614 			l2cap_chan_le_send_credits(chan);
6615 		}
6616 
6617 		return 0;
6618 	}
6619 
6620 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6621 	       chan->sdu->len, skb->len, chan->sdu_len);
6622 
6623 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6624 		BT_ERR("Too much LE L2CAP data received");
6625 		err = -EINVAL;
6626 		goto failed;
6627 	}
6628 
6629 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6630 	skb = NULL;
6631 
6632 	if (chan->sdu->len == chan->sdu_len) {
6633 		err = l2cap_ecred_recv(chan, chan->sdu);
6634 		if (!err) {
6635 			chan->sdu = NULL;
6636 			chan->sdu_last_frag = NULL;
6637 			chan->sdu_len = 0;
6638 		}
6639 	}
6640 
6641 failed:
6642 	if (err) {
6643 		kfree_skb(skb);
6644 		kfree_skb(chan->sdu);
6645 		chan->sdu = NULL;
6646 		chan->sdu_last_frag = NULL;
6647 		chan->sdu_len = 0;
6648 	}
6649 
6650 	/* We can't return an error here since we took care of the skb
6651 	 * freeing internally. An error return would cause the caller to
6652 	 * do a double-free of the skb.
6653 	 */
6654 	return 0;
6655 }
6656 
6657 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6658 			       struct sk_buff *skb)
6659 {
6660 	struct l2cap_chan *chan;
6661 
6662 	chan = l2cap_get_chan_by_scid(conn, cid);
6663 	if (!chan) {
6664 		BT_DBG("unknown cid 0x%4.4x", cid);
6665 		/* Drop packet and return */
6666 		kfree_skb(skb);
6667 		return;
6668 	}
6669 
6670 	BT_DBG("chan %p, len %d", chan, skb->len);
6671 
6672 	/* If we receive data on a fixed channel before the info req/rsp
6673 	 * procedure is done simply assume that the channel is supported
6674 	 * and mark it as ready.
6675 	 */
6676 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6677 		l2cap_chan_ready(chan);
6678 
6679 	if (chan->state != BT_CONNECTED)
6680 		goto drop;
6681 
6682 	switch (chan->mode) {
6683 	case L2CAP_MODE_LE_FLOWCTL:
6684 	case L2CAP_MODE_EXT_FLOWCTL:
6685 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6686 			goto drop;
6687 
6688 		goto done;
6689 
6690 	case L2CAP_MODE_BASIC:
6691 		/* If socket recv buffers overflows we drop data here
6692 		 * which is *bad* because L2CAP has to be reliable.
6693 		 * But we don't have any other choice. L2CAP doesn't
6694 		 * provide flow control mechanism. */
6695 
6696 		if (chan->imtu < skb->len) {
6697 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6698 			goto drop;
6699 		}
6700 
6701 		if (!chan->ops->recv(chan, skb))
6702 			goto done;
6703 		break;
6704 
6705 	case L2CAP_MODE_ERTM:
6706 	case L2CAP_MODE_STREAMING:
6707 		l2cap_data_rcv(chan, skb);
6708 		goto done;
6709 
6710 	default:
6711 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6712 		break;
6713 	}
6714 
6715 drop:
6716 	kfree_skb(skb);
6717 
6718 done:
6719 	l2cap_chan_unlock(chan);
6720 	l2cap_chan_put(chan);
6721 }
6722 
6723 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6724 				  struct sk_buff *skb)
6725 {
6726 	struct hci_conn *hcon = conn->hcon;
6727 	struct l2cap_chan *chan;
6728 
6729 	if (hcon->type != ACL_LINK)
6730 		goto free_skb;
6731 
6732 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6733 					ACL_LINK);
6734 	if (!chan)
6735 		goto free_skb;
6736 
6737 	BT_DBG("chan %p, len %d", chan, skb->len);
6738 
6739 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6740 		goto drop;
6741 
6742 	if (chan->imtu < skb->len)
6743 		goto drop;
6744 
6745 	/* Store remote BD_ADDR and PSM for msg_name */
6746 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6747 	bt_cb(skb)->l2cap.psm = psm;
6748 
6749 	if (!chan->ops->recv(chan, skb)) {
6750 		l2cap_chan_put(chan);
6751 		return;
6752 	}
6753 
6754 drop:
6755 	l2cap_chan_put(chan);
6756 free_skb:
6757 	kfree_skb(skb);
6758 }
6759 
6760 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6761 {
6762 	struct l2cap_hdr *lh = (void *) skb->data;
6763 	struct hci_conn *hcon = conn->hcon;
6764 	u16 cid, len;
6765 	__le16 psm;
6766 
6767 	if (hcon->state != BT_CONNECTED) {
6768 		BT_DBG("queueing pending rx skb");
6769 		skb_queue_tail(&conn->pending_rx, skb);
6770 		return;
6771 	}
6772 
6773 	skb_pull(skb, L2CAP_HDR_SIZE);
6774 	cid = __le16_to_cpu(lh->cid);
6775 	len = __le16_to_cpu(lh->len);
6776 
6777 	if (len != skb->len) {
6778 		kfree_skb(skb);
6779 		return;
6780 	}
6781 
6782 	/* Since we can't actively block incoming LE connections we must
6783 	 * at least ensure that we ignore incoming data from them.
6784 	 */
6785 	if (hcon->type == LE_LINK &&
6786 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6787 				   bdaddr_dst_type(hcon))) {
6788 		kfree_skb(skb);
6789 		return;
6790 	}
6791 
6792 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6793 
6794 	switch (cid) {
6795 	case L2CAP_CID_SIGNALING:
6796 		l2cap_sig_channel(conn, skb);
6797 		break;
6798 
6799 	case L2CAP_CID_CONN_LESS:
6800 		psm = get_unaligned((__le16 *) skb->data);
6801 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6802 		l2cap_conless_channel(conn, psm, skb);
6803 		break;
6804 
6805 	case L2CAP_CID_LE_SIGNALING:
6806 		l2cap_le_sig_channel(conn, skb);
6807 		break;
6808 
6809 	default:
6810 		l2cap_data_channel(conn, cid, skb);
6811 		break;
6812 	}
6813 }
6814 
6815 static void process_pending_rx(struct work_struct *work)
6816 {
6817 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6818 					       pending_rx_work);
6819 	struct sk_buff *skb;
6820 
6821 	BT_DBG("");
6822 
6823 	while ((skb = skb_dequeue(&conn->pending_rx)))
6824 		l2cap_recv_frame(conn, skb);
6825 }
6826 
6827 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6828 {
6829 	struct l2cap_conn *conn = hcon->l2cap_data;
6830 	struct hci_chan *hchan;
6831 
6832 	if (conn)
6833 		return conn;
6834 
6835 	hchan = hci_chan_create(hcon);
6836 	if (!hchan)
6837 		return NULL;
6838 
6839 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6840 	if (!conn) {
6841 		hci_chan_del(hchan);
6842 		return NULL;
6843 	}
6844 
6845 	kref_init(&conn->ref);
6846 	hcon->l2cap_data = conn;
6847 	conn->hcon = hci_conn_get(hcon);
6848 	conn->hchan = hchan;
6849 
6850 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6851 
6852 	switch (hcon->type) {
6853 	case LE_LINK:
6854 		if (hcon->hdev->le_mtu) {
6855 			conn->mtu = hcon->hdev->le_mtu;
6856 			break;
6857 		}
6858 		fallthrough;
6859 	default:
6860 		conn->mtu = hcon->hdev->acl_mtu;
6861 		break;
6862 	}
6863 
6864 	conn->feat_mask = 0;
6865 
6866 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6867 
6868 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6869 	    (bredr_sc_enabled(hcon->hdev) ||
6870 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6871 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6872 
6873 	mutex_init(&conn->ident_lock);
6874 	mutex_init(&conn->chan_lock);
6875 
6876 	INIT_LIST_HEAD(&conn->chan_l);
6877 	INIT_LIST_HEAD(&conn->users);
6878 
6879 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6880 
6881 	skb_queue_head_init(&conn->pending_rx);
6882 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6883 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6884 
6885 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6886 
6887 	return conn;
6888 }
6889 
6890 static bool is_valid_psm(u16 psm, u8 dst_type)
6891 {
6892 	if (!psm)
6893 		return false;
6894 
6895 	if (bdaddr_type_is_le(dst_type))
6896 		return (psm <= 0x00ff);
6897 
6898 	/* PSM must be odd and lsb of upper byte must be 0 */
6899 	return ((psm & 0x0101) == 0x0001);
6900 }
6901 
6902 struct l2cap_chan_data {
6903 	struct l2cap_chan *chan;
6904 	struct pid *pid;
6905 	int count;
6906 };
6907 
6908 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6909 {
6910 	struct l2cap_chan_data *d = data;
6911 	struct pid *pid;
6912 
6913 	if (chan == d->chan)
6914 		return;
6915 
6916 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6917 		return;
6918 
6919 	pid = chan->ops->get_peer_pid(chan);
6920 
6921 	/* Only count deferred channels with the same PID/PSM */
6922 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6923 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6924 		return;
6925 
6926 	d->count++;
6927 }
6928 
6929 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6930 		       bdaddr_t *dst, u8 dst_type)
6931 {
6932 	struct l2cap_conn *conn;
6933 	struct hci_conn *hcon;
6934 	struct hci_dev *hdev;
6935 	int err;
6936 
6937 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6938 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6939 
6940 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6941 	if (!hdev)
6942 		return -EHOSTUNREACH;
6943 
6944 	hci_dev_lock(hdev);
6945 
6946 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6947 	    chan->chan_type != L2CAP_CHAN_RAW) {
6948 		err = -EINVAL;
6949 		goto done;
6950 	}
6951 
6952 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6953 		err = -EINVAL;
6954 		goto done;
6955 	}
6956 
6957 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6958 		err = -EINVAL;
6959 		goto done;
6960 	}
6961 
6962 	switch (chan->mode) {
6963 	case L2CAP_MODE_BASIC:
6964 		break;
6965 	case L2CAP_MODE_LE_FLOWCTL:
6966 		break;
6967 	case L2CAP_MODE_EXT_FLOWCTL:
6968 		if (!enable_ecred) {
6969 			err = -EOPNOTSUPP;
6970 			goto done;
6971 		}
6972 		break;
6973 	case L2CAP_MODE_ERTM:
6974 	case L2CAP_MODE_STREAMING:
6975 		if (!disable_ertm)
6976 			break;
6977 		fallthrough;
6978 	default:
6979 		err = -EOPNOTSUPP;
6980 		goto done;
6981 	}
6982 
6983 	switch (chan->state) {
6984 	case BT_CONNECT:
6985 	case BT_CONNECT2:
6986 	case BT_CONFIG:
6987 		/* Already connecting */
6988 		err = 0;
6989 		goto done;
6990 
6991 	case BT_CONNECTED:
6992 		/* Already connected */
6993 		err = -EISCONN;
6994 		goto done;
6995 
6996 	case BT_OPEN:
6997 	case BT_BOUND:
6998 		/* Can connect */
6999 		break;
7000 
7001 	default:
7002 		err = -EBADFD;
7003 		goto done;
7004 	}
7005 
7006 	/* Set destination address and psm */
7007 	bacpy(&chan->dst, dst);
7008 	chan->dst_type = dst_type;
7009 
7010 	chan->psm = psm;
7011 	chan->dcid = cid;
7012 
7013 	if (bdaddr_type_is_le(dst_type)) {
7014 		/* Convert from L2CAP channel address type to HCI address type
7015 		 */
7016 		if (dst_type == BDADDR_LE_PUBLIC)
7017 			dst_type = ADDR_LE_DEV_PUBLIC;
7018 		else
7019 			dst_type = ADDR_LE_DEV_RANDOM;
7020 
7021 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7022 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7023 					      chan->sec_level,
7024 					      HCI_LE_CONN_TIMEOUT,
7025 					      HCI_ROLE_SLAVE);
7026 		else
7027 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7028 						   chan->sec_level,
7029 						   HCI_LE_CONN_TIMEOUT,
7030 						   CONN_REASON_L2CAP_CHAN);
7031 
7032 	} else {
7033 		u8 auth_type = l2cap_get_auth_type(chan);
7034 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7035 				       CONN_REASON_L2CAP_CHAN);
7036 	}
7037 
7038 	if (IS_ERR(hcon)) {
7039 		err = PTR_ERR(hcon);
7040 		goto done;
7041 	}
7042 
7043 	conn = l2cap_conn_add(hcon);
7044 	if (!conn) {
7045 		hci_conn_drop(hcon);
7046 		err = -ENOMEM;
7047 		goto done;
7048 	}
7049 
7050 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7051 		struct l2cap_chan_data data;
7052 
7053 		data.chan = chan;
7054 		data.pid = chan->ops->get_peer_pid(chan);
7055 		data.count = 1;
7056 
7057 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7058 
7059 		/* Check if there isn't too many channels being connected */
7060 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7061 			hci_conn_drop(hcon);
7062 			err = -EPROTO;
7063 			goto done;
7064 		}
7065 	}
7066 
7067 	mutex_lock(&conn->chan_lock);
7068 	l2cap_chan_lock(chan);
7069 
7070 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7071 		hci_conn_drop(hcon);
7072 		err = -EBUSY;
7073 		goto chan_unlock;
7074 	}
7075 
7076 	/* Update source addr of the socket */
7077 	bacpy(&chan->src, &hcon->src);
7078 	chan->src_type = bdaddr_src_type(hcon);
7079 
7080 	__l2cap_chan_add(conn, chan);
7081 
7082 	/* l2cap_chan_add takes its own ref so we can drop this one */
7083 	hci_conn_drop(hcon);
7084 
7085 	l2cap_state_change(chan, BT_CONNECT);
7086 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7087 
7088 	/* Release chan->sport so that it can be reused by other
7089 	 * sockets (as it's only used for listening sockets).
7090 	 */
7091 	write_lock(&chan_list_lock);
7092 	chan->sport = 0;
7093 	write_unlock(&chan_list_lock);
7094 
7095 	if (hcon->state == BT_CONNECTED) {
7096 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7097 			__clear_chan_timer(chan);
7098 			if (l2cap_chan_check_security(chan, true))
7099 				l2cap_state_change(chan, BT_CONNECTED);
7100 		} else
7101 			l2cap_do_start(chan);
7102 	}
7103 
7104 	err = 0;
7105 
7106 chan_unlock:
7107 	l2cap_chan_unlock(chan);
7108 	mutex_unlock(&conn->chan_lock);
7109 done:
7110 	hci_dev_unlock(hdev);
7111 	hci_dev_put(hdev);
7112 	return err;
7113 }
7114 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7115 
7116 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7117 {
7118 	struct l2cap_conn *conn = chan->conn;
7119 	struct {
7120 		struct l2cap_ecred_reconf_req req;
7121 		__le16 scid;
7122 	} pdu;
7123 
7124 	pdu.req.mtu = cpu_to_le16(chan->imtu);
7125 	pdu.req.mps = cpu_to_le16(chan->mps);
7126 	pdu.scid    = cpu_to_le16(chan->scid);
7127 
7128 	chan->ident = l2cap_get_ident(conn);
7129 
7130 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7131 		       sizeof(pdu), &pdu);
7132 }
7133 
7134 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7135 {
7136 	if (chan->imtu > mtu)
7137 		return -EINVAL;
7138 
7139 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7140 
7141 	chan->imtu = mtu;
7142 
7143 	l2cap_ecred_reconfigure(chan);
7144 
7145 	return 0;
7146 }
7147 
7148 /* ---- L2CAP interface with lower layer (HCI) ---- */
7149 
7150 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7151 {
7152 	int exact = 0, lm1 = 0, lm2 = 0;
7153 	struct l2cap_chan *c;
7154 
7155 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7156 
7157 	/* Find listening sockets and check their link_mode */
7158 	read_lock(&chan_list_lock);
7159 	list_for_each_entry(c, &chan_list, global_l) {
7160 		if (c->state != BT_LISTEN)
7161 			continue;
7162 
7163 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7164 			lm1 |= HCI_LM_ACCEPT;
7165 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7166 				lm1 |= HCI_LM_MASTER;
7167 			exact++;
7168 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7169 			lm2 |= HCI_LM_ACCEPT;
7170 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7171 				lm2 |= HCI_LM_MASTER;
7172 		}
7173 	}
7174 	read_unlock(&chan_list_lock);
7175 
7176 	return exact ? lm1 : lm2;
7177 }
7178 
7179 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7180  * from an existing channel in the list or from the beginning of the
7181  * global list (by passing NULL as first parameter).
7182  */
7183 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7184 						  struct hci_conn *hcon)
7185 {
7186 	u8 src_type = bdaddr_src_type(hcon);
7187 
7188 	read_lock(&chan_list_lock);
7189 
7190 	if (c)
7191 		c = list_next_entry(c, global_l);
7192 	else
7193 		c = list_entry(chan_list.next, typeof(*c), global_l);
7194 
7195 	list_for_each_entry_from(c, &chan_list, global_l) {
7196 		if (c->chan_type != L2CAP_CHAN_FIXED)
7197 			continue;
7198 		if (c->state != BT_LISTEN)
7199 			continue;
7200 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7201 			continue;
7202 		if (src_type != c->src_type)
7203 			continue;
7204 
7205 		c = l2cap_chan_hold_unless_zero(c);
7206 		read_unlock(&chan_list_lock);
7207 		return c;
7208 	}
7209 
7210 	read_unlock(&chan_list_lock);
7211 
7212 	return NULL;
7213 }
7214 
7215 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7216 {
7217 	struct hci_dev *hdev = hcon->hdev;
7218 	struct l2cap_conn *conn;
7219 	struct l2cap_chan *pchan;
7220 	u8 dst_type;
7221 
7222 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7223 		return;
7224 
7225 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7226 
7227 	if (status) {
7228 		l2cap_conn_del(hcon, bt_to_errno(status));
7229 		return;
7230 	}
7231 
7232 	conn = l2cap_conn_add(hcon);
7233 	if (!conn)
7234 		return;
7235 
7236 	dst_type = bdaddr_dst_type(hcon);
7237 
7238 	/* If device is blocked, do not create channels for it */
7239 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7240 		return;
7241 
7242 	/* Find fixed channels and notify them of the new connection. We
7243 	 * use multiple individual lookups, continuing each time where
7244 	 * we left off, because the list lock would prevent calling the
7245 	 * potentially sleeping l2cap_chan_lock() function.
7246 	 */
7247 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7248 	while (pchan) {
7249 		struct l2cap_chan *chan, *next;
7250 
7251 		/* Client fixed channels should override server ones */
7252 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7253 			goto next;
7254 
7255 		l2cap_chan_lock(pchan);
7256 		chan = pchan->ops->new_connection(pchan);
7257 		if (chan) {
7258 			bacpy(&chan->src, &hcon->src);
7259 			bacpy(&chan->dst, &hcon->dst);
7260 			chan->src_type = bdaddr_src_type(hcon);
7261 			chan->dst_type = dst_type;
7262 
7263 			__l2cap_chan_add(conn, chan);
7264 		}
7265 
7266 		l2cap_chan_unlock(pchan);
7267 next:
7268 		next = l2cap_global_fixed_chan(pchan, hcon);
7269 		l2cap_chan_put(pchan);
7270 		pchan = next;
7271 	}
7272 
7273 	l2cap_conn_ready(conn);
7274 }
7275 
7276 int l2cap_disconn_ind(struct hci_conn *hcon)
7277 {
7278 	struct l2cap_conn *conn = hcon->l2cap_data;
7279 
7280 	BT_DBG("hcon %p", hcon);
7281 
7282 	if (!conn)
7283 		return HCI_ERROR_REMOTE_USER_TERM;
7284 	return conn->disc_reason;
7285 }
7286 
7287 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7288 {
7289 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7290 		return;
7291 
7292 	BT_DBG("hcon %p reason %d", hcon, reason);
7293 
7294 	l2cap_conn_del(hcon, bt_to_errno(reason));
7295 }
7296 
7297 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7298 {
7299 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7300 		return;
7301 
7302 	if (encrypt == 0x00) {
7303 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7304 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7305 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7306 			   chan->sec_level == BT_SECURITY_FIPS)
7307 			l2cap_chan_close(chan, ECONNREFUSED);
7308 	} else {
7309 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7310 			__clear_chan_timer(chan);
7311 	}
7312 }
7313 
7314 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7315 {
7316 	struct l2cap_conn *conn = hcon->l2cap_data;
7317 	struct l2cap_chan *chan;
7318 
7319 	if (!conn)
7320 		return;
7321 
7322 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7323 
7324 	mutex_lock(&conn->chan_lock);
7325 
7326 	list_for_each_entry(chan, &conn->chan_l, list) {
7327 		l2cap_chan_lock(chan);
7328 
7329 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7330 		       state_to_string(chan->state));
7331 
7332 		if (!status && encrypt)
7333 			chan->sec_level = hcon->sec_level;
7334 
7335 		if (!__l2cap_no_conn_pending(chan)) {
7336 			l2cap_chan_unlock(chan);
7337 			continue;
7338 		}
7339 
7340 		if (!status && (chan->state == BT_CONNECTED ||
7341 				chan->state == BT_CONFIG)) {
7342 			chan->ops->resume(chan);
7343 			l2cap_check_encryption(chan, encrypt);
7344 			l2cap_chan_unlock(chan);
7345 			continue;
7346 		}
7347 
7348 		if (chan->state == BT_CONNECT) {
7349 			if (!status && l2cap_check_enc_key_size(hcon))
7350 				l2cap_start_connection(chan);
7351 			else
7352 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7353 		} else if (chan->state == BT_CONNECT2 &&
7354 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7355 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7356 			struct l2cap_conn_rsp rsp;
7357 			__u16 res, stat;
7358 
7359 			if (!status && l2cap_check_enc_key_size(hcon)) {
7360 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7361 					res = L2CAP_CR_PEND;
7362 					stat = L2CAP_CS_AUTHOR_PEND;
7363 					chan->ops->defer(chan);
7364 				} else {
7365 					l2cap_state_change(chan, BT_CONFIG);
7366 					res = L2CAP_CR_SUCCESS;
7367 					stat = L2CAP_CS_NO_INFO;
7368 				}
7369 			} else {
7370 				l2cap_state_change(chan, BT_DISCONN);
7371 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7372 				res = L2CAP_CR_SEC_BLOCK;
7373 				stat = L2CAP_CS_NO_INFO;
7374 			}
7375 
7376 			rsp.scid   = cpu_to_le16(chan->dcid);
7377 			rsp.dcid   = cpu_to_le16(chan->scid);
7378 			rsp.result = cpu_to_le16(res);
7379 			rsp.status = cpu_to_le16(stat);
7380 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7381 				       sizeof(rsp), &rsp);
7382 
7383 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7384 			    res == L2CAP_CR_SUCCESS) {
7385 				char buf[128];
7386 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7387 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7388 					       L2CAP_CONF_REQ,
7389 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7390 					       buf);
7391 				chan->num_conf_req++;
7392 			}
7393 		}
7394 
7395 		l2cap_chan_unlock(chan);
7396 	}
7397 
7398 	mutex_unlock(&conn->chan_lock);
7399 }
7400 
7401 /* Append fragment into frame respecting the maximum len of rx_skb */
7402 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7403 			   u16 len)
7404 {
7405 	if (!conn->rx_skb) {
7406 		/* Allocate skb for the complete frame (with header) */
7407 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7408 		if (!conn->rx_skb)
7409 			return -ENOMEM;
7410 		/* Init rx_len */
7411 		conn->rx_len = len;
7412 	}
7413 
7414 	/* Copy as much as the rx_skb can hold */
7415 	len = min_t(u16, len, skb->len);
7416 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7417 	skb_pull(skb, len);
7418 	conn->rx_len -= len;
7419 
7420 	return len;
7421 }
7422 
7423 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7424 {
7425 	struct sk_buff *rx_skb;
7426 	int len;
7427 
7428 	/* Append just enough to complete the header */
7429 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7430 
7431 	/* If header could not be read just continue */
7432 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7433 		return len;
7434 
7435 	rx_skb = conn->rx_skb;
7436 	len = get_unaligned_le16(rx_skb->data);
7437 
7438 	/* Check if rx_skb has enough space to received all fragments */
7439 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7440 		/* Update expected len */
7441 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7442 		return L2CAP_LEN_SIZE;
7443 	}
7444 
7445 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7446 	 * fit all fragments.
7447 	 */
7448 	conn->rx_skb = NULL;
7449 
7450 	/* Reallocates rx_skb using the exact expected length */
7451 	len = l2cap_recv_frag(conn, rx_skb,
7452 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7453 	kfree_skb(rx_skb);
7454 
7455 	return len;
7456 }
7457 
7458 static void l2cap_recv_reset(struct l2cap_conn *conn)
7459 {
7460 	kfree_skb(conn->rx_skb);
7461 	conn->rx_skb = NULL;
7462 	conn->rx_len = 0;
7463 }
7464 
7465 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7466 {
7467 	struct l2cap_conn *conn = hcon->l2cap_data;
7468 	int len;
7469 
7470 	/* For AMP controller do not create l2cap conn */
7471 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7472 		goto drop;
7473 
7474 	if (!conn)
7475 		conn = l2cap_conn_add(hcon);
7476 
7477 	if (!conn)
7478 		goto drop;
7479 
7480 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7481 
7482 	switch (flags) {
7483 	case ACL_START:
7484 	case ACL_START_NO_FLUSH:
7485 	case ACL_COMPLETE:
7486 		if (conn->rx_skb) {
7487 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7488 			l2cap_recv_reset(conn);
7489 			l2cap_conn_unreliable(conn, ECOMM);
7490 		}
7491 
7492 		/* Start fragment may not contain the L2CAP length so just
7493 		 * copy the initial byte when that happens and use conn->mtu as
7494 		 * expected length.
7495 		 */
7496 		if (skb->len < L2CAP_LEN_SIZE) {
7497 			l2cap_recv_frag(conn, skb, conn->mtu);
7498 			break;
7499 		}
7500 
7501 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7502 
7503 		if (len == skb->len) {
7504 			/* Complete frame received */
7505 			l2cap_recv_frame(conn, skb);
7506 			return;
7507 		}
7508 
7509 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7510 
7511 		if (skb->len > len) {
7512 			BT_ERR("Frame is too long (len %u, expected len %d)",
7513 			       skb->len, len);
7514 			l2cap_conn_unreliable(conn, ECOMM);
7515 			goto drop;
7516 		}
7517 
7518 		/* Append fragment into frame (with header) */
7519 		if (l2cap_recv_frag(conn, skb, len) < 0)
7520 			goto drop;
7521 
7522 		break;
7523 
7524 	case ACL_CONT:
7525 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7526 
7527 		if (!conn->rx_skb) {
7528 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7529 			l2cap_conn_unreliable(conn, ECOMM);
7530 			goto drop;
7531 		}
7532 
7533 		/* Complete the L2CAP length if it has not been read */
7534 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7535 			if (l2cap_recv_len(conn, skb) < 0) {
7536 				l2cap_conn_unreliable(conn, ECOMM);
7537 				goto drop;
7538 			}
7539 
7540 			/* Header still could not be read just continue */
7541 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7542 				break;
7543 		}
7544 
7545 		if (skb->len > conn->rx_len) {
7546 			BT_ERR("Fragment is too long (len %u, expected %u)",
7547 			       skb->len, conn->rx_len);
7548 			l2cap_recv_reset(conn);
7549 			l2cap_conn_unreliable(conn, ECOMM);
7550 			goto drop;
7551 		}
7552 
7553 		/* Append fragment into frame (with header) */
7554 		l2cap_recv_frag(conn, skb, skb->len);
7555 
7556 		if (!conn->rx_len) {
7557 			/* Complete frame received. l2cap_recv_frame
7558 			 * takes ownership of the skb so set the global
7559 			 * rx_skb pointer to NULL first.
7560 			 */
7561 			struct sk_buff *rx_skb = conn->rx_skb;
7562 			conn->rx_skb = NULL;
7563 			l2cap_recv_frame(conn, rx_skb);
7564 		}
7565 		break;
7566 	}
7567 
7568 drop:
7569 	kfree_skb(skb);
7570 }
7571 
7572 static struct hci_cb l2cap_cb = {
7573 	.name		= "L2CAP",
7574 	.connect_cfm	= l2cap_connect_cfm,
7575 	.disconn_cfm	= l2cap_disconn_cfm,
7576 	.security_cfm	= l2cap_security_cfm,
7577 };
7578 
7579 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7580 {
7581 	struct l2cap_chan *c;
7582 
7583 	read_lock(&chan_list_lock);
7584 
7585 	list_for_each_entry(c, &chan_list, global_l) {
7586 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7587 			   &c->src, c->src_type, &c->dst, c->dst_type,
7588 			   c->state, __le16_to_cpu(c->psm),
7589 			   c->scid, c->dcid, c->imtu, c->omtu,
7590 			   c->sec_level, c->mode);
7591 	}
7592 
7593 	read_unlock(&chan_list_lock);
7594 
7595 	return 0;
7596 }
7597 
7598 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7599 
7600 static struct dentry *l2cap_debugfs;
7601 
7602 int __init l2cap_init(void)
7603 {
7604 	int err;
7605 
7606 	err = l2cap_init_sockets();
7607 	if (err < 0)
7608 		return err;
7609 
7610 	hci_register_cb(&l2cap_cb);
7611 
7612 	if (IS_ERR_OR_NULL(bt_debugfs))
7613 		return 0;
7614 
7615 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7616 					    NULL, &l2cap_debugfs_fops);
7617 
7618 	return 0;
7619 }
7620 
7621 void l2cap_exit(void)
7622 {
7623 	debugfs_remove(l2cap_debugfs);
7624 	hci_unregister_cb(&l2cap_cb);
7625 	l2cap_cleanup_sockets();
7626 }
7627 
7628 module_param(disable_ertm, bool, 0644);
7629 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7630 
7631 module_param(enable_ecred, bool, 0644);
7632 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7633