xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 28f65c11)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6 
7    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License version 2 as
11    published by the Free Software Foundation;
12 
13    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 
22    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24    SOFTWARE IS DISCLAIMED.
25 */
26 
27 /* Bluetooth L2CAP core. */
28 
29 #include <linux/module.h>
30 
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50 
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53 
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 
58 int disable_ertm;
59 
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62 
63 static struct workqueue_struct *_busy_wq;
64 
65 LIST_HEAD(chan_list);
66 DEFINE_RWLOCK(chan_list_lock);
67 
68 static void l2cap_busy_work(struct work_struct *work);
69 
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 				u8 code, u8 ident, u16 dlen, void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 
76 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78 {
79 	struct l2cap_chan *c;
80 
81 	list_for_each_entry(c, &conn->chan_l, list) {
82 		if (c->dcid == cid)
83 			return c;
84 	}
85 	return NULL;
86 
87 }
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->scid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 /* Find channel with given SCID.
101  * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	read_lock(&conn->chan_lock);
107 	c = __l2cap_get_chan_by_scid(conn, cid);
108 	if (c)
109 		bh_lock_sock(c->sk);
110 	read_unlock(&conn->chan_lock);
111 	return c;
112 }
113 
114 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115 {
116 	struct l2cap_chan *c;
117 
118 	list_for_each_entry(c, &conn->chan_l, list) {
119 		if (c->ident == ident)
120 			return c;
121 	}
122 	return NULL;
123 }
124 
125 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 {
127 	struct l2cap_chan *c;
128 
129 	read_lock(&conn->chan_lock);
130 	c = __l2cap_get_chan_by_ident(conn, ident);
131 	if (c)
132 		bh_lock_sock(c->sk);
133 	read_unlock(&conn->chan_lock);
134 	return c;
135 }
136 
137 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
138 {
139 	struct l2cap_chan *c;
140 
141 	list_for_each_entry(c, &chan_list, global_l) {
142 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
143 			goto found;
144 	}
145 
146 	c = NULL;
147 found:
148 	return c;
149 }
150 
151 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
152 {
153 	int err;
154 
155 	write_lock_bh(&chan_list_lock);
156 
157 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
158 		err = -EADDRINUSE;
159 		goto done;
160 	}
161 
162 	if (psm) {
163 		chan->psm = psm;
164 		chan->sport = psm;
165 		err = 0;
166 	} else {
167 		u16 p;
168 
169 		err = -EINVAL;
170 		for (p = 0x1001; p < 0x1100; p += 2)
171 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
172 				chan->psm   = cpu_to_le16(p);
173 				chan->sport = cpu_to_le16(p);
174 				err = 0;
175 				break;
176 			}
177 	}
178 
179 done:
180 	write_unlock_bh(&chan_list_lock);
181 	return err;
182 }
183 
184 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
185 {
186 	write_lock_bh(&chan_list_lock);
187 
188 	chan->scid = scid;
189 
190 	write_unlock_bh(&chan_list_lock);
191 
192 	return 0;
193 }
194 
195 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
196 {
197 	u16 cid = L2CAP_CID_DYN_START;
198 
199 	for (; cid < L2CAP_CID_DYN_END; cid++) {
200 		if (!__l2cap_get_chan_by_scid(conn, cid))
201 			return cid;
202 	}
203 
204 	return 0;
205 }
206 
207 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
208 {
209 	struct l2cap_chan *chan;
210 
211 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
212 	if (!chan)
213 		return NULL;
214 
215 	chan->sk = sk;
216 
217 	write_lock_bh(&chan_list_lock);
218 	list_add(&chan->global_l, &chan_list);
219 	write_unlock_bh(&chan_list_lock);
220 
221 	return chan;
222 }
223 
224 void l2cap_chan_destroy(struct l2cap_chan *chan)
225 {
226 	write_lock_bh(&chan_list_lock);
227 	list_del(&chan->global_l);
228 	write_unlock_bh(&chan_list_lock);
229 
230 	kfree(chan);
231 }
232 
233 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
234 {
235 	struct sock *sk = chan->sk;
236 
237 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238 			chan->psm, chan->dcid);
239 
240 	conn->disc_reason = 0x13;
241 
242 	chan->conn = conn;
243 
244 	if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
245 		if (conn->hcon->type == LE_LINK) {
246 			/* LE connection */
247 			chan->omtu = L2CAP_LE_DEFAULT_MTU;
248 			chan->scid = L2CAP_CID_LE_DATA;
249 			chan->dcid = L2CAP_CID_LE_DATA;
250 		} else {
251 			/* Alloc CID for connection-oriented socket */
252 			chan->scid = l2cap_alloc_cid(conn);
253 			chan->omtu = L2CAP_DEFAULT_MTU;
254 		}
255 	} else if (sk->sk_type == SOCK_DGRAM) {
256 		/* Connectionless socket */
257 		chan->scid = L2CAP_CID_CONN_LESS;
258 		chan->dcid = L2CAP_CID_CONN_LESS;
259 		chan->omtu = L2CAP_DEFAULT_MTU;
260 	} else {
261 		/* Raw socket can send/recv signalling messages only */
262 		chan->scid = L2CAP_CID_SIGNALING;
263 		chan->dcid = L2CAP_CID_SIGNALING;
264 		chan->omtu = L2CAP_DEFAULT_MTU;
265 	}
266 
267 	sock_hold(sk);
268 
269 	list_add(&chan->list, &conn->chan_l);
270 }
271 
272 /* Delete channel.
273  * Must be called on the locked socket. */
274 void l2cap_chan_del(struct l2cap_chan *chan, int err)
275 {
276 	struct sock *sk = chan->sk;
277 	struct l2cap_conn *conn = chan->conn;
278 	struct sock *parent = bt_sk(sk)->parent;
279 
280 	l2cap_sock_clear_timer(sk);
281 
282 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
283 
284 	if (conn) {
285 		/* Delete from channel list */
286 		write_lock_bh(&conn->chan_lock);
287 		list_del(&chan->list);
288 		write_unlock_bh(&conn->chan_lock);
289 		__sock_put(sk);
290 
291 		chan->conn = NULL;
292 		hci_conn_put(conn->hcon);
293 	}
294 
295 	sk->sk_state = BT_CLOSED;
296 	sock_set_flag(sk, SOCK_ZAPPED);
297 
298 	if (err)
299 		sk->sk_err = err;
300 
301 	if (parent) {
302 		bt_accept_unlink(sk);
303 		parent->sk_data_ready(parent, 0);
304 	} else
305 		sk->sk_state_change(sk);
306 
307 	if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
308 			chan->conf_state & L2CAP_CONF_INPUT_DONE))
309 		return;
310 
311 	skb_queue_purge(&chan->tx_q);
312 
313 	if (chan->mode == L2CAP_MODE_ERTM) {
314 		struct srej_list *l, *tmp;
315 
316 		del_timer(&chan->retrans_timer);
317 		del_timer(&chan->monitor_timer);
318 		del_timer(&chan->ack_timer);
319 
320 		skb_queue_purge(&chan->srej_q);
321 		skb_queue_purge(&chan->busy_q);
322 
323 		list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324 			list_del(&l->list);
325 			kfree(l);
326 		}
327 	}
328 }
329 
330 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
331 {
332 	struct sock *sk = chan->sk;
333 
334 	if (sk->sk_type == SOCK_RAW) {
335 		switch (chan->sec_level) {
336 		case BT_SECURITY_HIGH:
337 			return HCI_AT_DEDICATED_BONDING_MITM;
338 		case BT_SECURITY_MEDIUM:
339 			return HCI_AT_DEDICATED_BONDING;
340 		default:
341 			return HCI_AT_NO_BONDING;
342 		}
343 	} else if (chan->psm == cpu_to_le16(0x0001)) {
344 		if (chan->sec_level == BT_SECURITY_LOW)
345 			chan->sec_level = BT_SECURITY_SDP;
346 
347 		if (chan->sec_level == BT_SECURITY_HIGH)
348 			return HCI_AT_NO_BONDING_MITM;
349 		else
350 			return HCI_AT_NO_BONDING;
351 	} else {
352 		switch (chan->sec_level) {
353 		case BT_SECURITY_HIGH:
354 			return HCI_AT_GENERAL_BONDING_MITM;
355 		case BT_SECURITY_MEDIUM:
356 			return HCI_AT_GENERAL_BONDING;
357 		default:
358 			return HCI_AT_NO_BONDING;
359 		}
360 	}
361 }
362 
363 /* Service level security */
364 static inline int l2cap_check_security(struct l2cap_chan *chan)
365 {
366 	struct l2cap_conn *conn = chan->conn;
367 	__u8 auth_type;
368 
369 	auth_type = l2cap_get_auth_type(chan);
370 
371 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
372 }
373 
374 u8 l2cap_get_ident(struct l2cap_conn *conn)
375 {
376 	u8 id;
377 
378 	/* Get next available identificator.
379 	 *    1 - 128 are used by kernel.
380 	 *  129 - 199 are reserved.
381 	 *  200 - 254 are used by utilities like l2ping, etc.
382 	 */
383 
384 	spin_lock_bh(&conn->lock);
385 
386 	if (++conn->tx_ident > 128)
387 		conn->tx_ident = 1;
388 
389 	id = conn->tx_ident;
390 
391 	spin_unlock_bh(&conn->lock);
392 
393 	return id;
394 }
395 
396 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
397 {
398 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399 	u8 flags;
400 
401 	BT_DBG("code 0x%2.2x", code);
402 
403 	if (!skb)
404 		return;
405 
406 	if (lmp_no_flush_capable(conn->hcon->hdev))
407 		flags = ACL_START_NO_FLUSH;
408 	else
409 		flags = ACL_START;
410 
411 	hci_send_acl(conn->hcon, skb, flags);
412 }
413 
414 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
415 {
416 	struct sk_buff *skb;
417 	struct l2cap_hdr *lh;
418 	struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 	struct l2cap_conn *conn = chan->conn;
420 	struct sock *sk = (struct sock *)pi;
421 	int count, hlen = L2CAP_HDR_SIZE + 2;
422 	u8 flags;
423 
424 	if (sk->sk_state != BT_CONNECTED)
425 		return;
426 
427 	if (chan->fcs == L2CAP_FCS_CRC16)
428 		hlen += 2;
429 
430 	BT_DBG("chan %p, control 0x%2.2x", chan, control);
431 
432 	count = min_t(unsigned int, conn->mtu, hlen);
433 	control |= L2CAP_CTRL_FRAME_TYPE;
434 
435 	if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
436 		control |= L2CAP_CTRL_FINAL;
437 		chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
438 	}
439 
440 	if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
441 		control |= L2CAP_CTRL_POLL;
442 		chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
443 	}
444 
445 	skb = bt_skb_alloc(count, GFP_ATOMIC);
446 	if (!skb)
447 		return;
448 
449 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
450 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
451 	lh->cid = cpu_to_le16(chan->dcid);
452 	put_unaligned_le16(control, skb_put(skb, 2));
453 
454 	if (chan->fcs == L2CAP_FCS_CRC16) {
455 		u16 fcs = crc16(0, (u8 *)lh, count - 2);
456 		put_unaligned_le16(fcs, skb_put(skb, 2));
457 	}
458 
459 	if (lmp_no_flush_capable(conn->hcon->hdev))
460 		flags = ACL_START_NO_FLUSH;
461 	else
462 		flags = ACL_START;
463 
464 	hci_send_acl(chan->conn->hcon, skb, flags);
465 }
466 
467 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
468 {
469 	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
470 		control |= L2CAP_SUPER_RCV_NOT_READY;
471 		chan->conn_state |= L2CAP_CONN_RNR_SENT;
472 	} else
473 		control |= L2CAP_SUPER_RCV_READY;
474 
475 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
476 
477 	l2cap_send_sframe(chan, control);
478 }
479 
480 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
481 {
482 	return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
483 }
484 
485 static void l2cap_do_start(struct l2cap_chan *chan)
486 {
487 	struct l2cap_conn *conn = chan->conn;
488 
489 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
490 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
491 			return;
492 
493 		if (l2cap_check_security(chan) &&
494 				__l2cap_no_conn_pending(chan)) {
495 			struct l2cap_conn_req req;
496 			req.scid = cpu_to_le16(chan->scid);
497 			req.psm  = chan->psm;
498 
499 			chan->ident = l2cap_get_ident(conn);
500 			chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
501 
502 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503 							sizeof(req), &req);
504 		}
505 	} else {
506 		struct l2cap_info_req req;
507 		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
508 
509 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
510 		conn->info_ident = l2cap_get_ident(conn);
511 
512 		mod_timer(&conn->info_timer, jiffies +
513 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
514 
515 		l2cap_send_cmd(conn, conn->info_ident,
516 					L2CAP_INFO_REQ, sizeof(req), &req);
517 	}
518 }
519 
520 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
521 {
522 	u32 local_feat_mask = l2cap_feat_mask;
523 	if (!disable_ertm)
524 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
525 
526 	switch (mode) {
527 	case L2CAP_MODE_ERTM:
528 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
529 	case L2CAP_MODE_STREAMING:
530 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
531 	default:
532 		return 0x00;
533 	}
534 }
535 
536 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
537 {
538 	struct sock *sk;
539 	struct l2cap_disconn_req req;
540 
541 	if (!conn)
542 		return;
543 
544 	sk = chan->sk;
545 
546 	if (chan->mode == L2CAP_MODE_ERTM) {
547 		del_timer(&chan->retrans_timer);
548 		del_timer(&chan->monitor_timer);
549 		del_timer(&chan->ack_timer);
550 	}
551 
552 	req.dcid = cpu_to_le16(chan->dcid);
553 	req.scid = cpu_to_le16(chan->scid);
554 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
555 			L2CAP_DISCONN_REQ, sizeof(req), &req);
556 
557 	sk->sk_state = BT_DISCONN;
558 	sk->sk_err = err;
559 }
560 
561 /* ---- L2CAP connections ---- */
562 static void l2cap_conn_start(struct l2cap_conn *conn)
563 {
564 	struct l2cap_chan *chan, *tmp;
565 
566 	BT_DBG("conn %p", conn);
567 
568 	read_lock(&conn->chan_lock);
569 
570 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
571 		struct sock *sk = chan->sk;
572 
573 		bh_lock_sock(sk);
574 
575 		if (sk->sk_type != SOCK_SEQPACKET &&
576 				sk->sk_type != SOCK_STREAM) {
577 			bh_unlock_sock(sk);
578 			continue;
579 		}
580 
581 		if (sk->sk_state == BT_CONNECT) {
582 			struct l2cap_conn_req req;
583 
584 			if (!l2cap_check_security(chan) ||
585 					!__l2cap_no_conn_pending(chan)) {
586 				bh_unlock_sock(sk);
587 				continue;
588 			}
589 
590 			if (!l2cap_mode_supported(chan->mode,
591 					conn->feat_mask)
592 					&& chan->conf_state &
593 					L2CAP_CONF_STATE2_DEVICE) {
594 				/* __l2cap_sock_close() calls list_del(chan)
595 				 * so release the lock */
596 				read_unlock_bh(&conn->chan_lock);
597 				 __l2cap_sock_close(sk, ECONNRESET);
598 				read_lock_bh(&conn->chan_lock);
599 				bh_unlock_sock(sk);
600 				continue;
601 			}
602 
603 			req.scid = cpu_to_le16(chan->scid);
604 			req.psm  = chan->psm;
605 
606 			chan->ident = l2cap_get_ident(conn);
607 			chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
608 
609 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610 							sizeof(req), &req);
611 
612 		} else if (sk->sk_state == BT_CONNECT2) {
613 			struct l2cap_conn_rsp rsp;
614 			char buf[128];
615 			rsp.scid = cpu_to_le16(chan->dcid);
616 			rsp.dcid = cpu_to_le16(chan->scid);
617 
618 			if (l2cap_check_security(chan)) {
619 				if (bt_sk(sk)->defer_setup) {
620 					struct sock *parent = bt_sk(sk)->parent;
621 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
622 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
623 					parent->sk_data_ready(parent, 0);
624 
625 				} else {
626 					sk->sk_state = BT_CONFIG;
627 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
628 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
629 				}
630 			} else {
631 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
632 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
633 			}
634 
635 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
636 							sizeof(rsp), &rsp);
637 
638 			if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
639 					rsp.result != L2CAP_CR_SUCCESS) {
640 				bh_unlock_sock(sk);
641 				continue;
642 			}
643 
644 			chan->conf_state |= L2CAP_CONF_REQ_SENT;
645 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
646 						l2cap_build_conf_req(chan, buf), buf);
647 			chan->num_conf_req++;
648 		}
649 
650 		bh_unlock_sock(sk);
651 	}
652 
653 	read_unlock(&conn->chan_lock);
654 }
655 
656 /* Find socket with cid and source bdaddr.
657  * Returns closest match, locked.
658  */
659 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
660 {
661 	struct l2cap_chan *c, *c1 = NULL;
662 
663 	read_lock(&chan_list_lock);
664 
665 	list_for_each_entry(c, &chan_list, global_l) {
666 		struct sock *sk = c->sk;
667 
668 		if (state && sk->sk_state != state)
669 			continue;
670 
671 		if (c->scid == cid) {
672 			/* Exact match. */
673 			if (!bacmp(&bt_sk(sk)->src, src)) {
674 				read_unlock(&chan_list_lock);
675 				return c;
676 			}
677 
678 			/* Closest match */
679 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
680 				c1 = c;
681 		}
682 	}
683 
684 	read_unlock(&chan_list_lock);
685 
686 	return c1;
687 }
688 
689 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
690 {
691 	struct sock *parent, *sk;
692 	struct l2cap_chan *chan, *pchan;
693 
694 	BT_DBG("");
695 
696 	/* Check if we have socket listening on cid */
697 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
698 							conn->src);
699 	if (!pchan)
700 		return;
701 
702 	parent = pchan->sk;
703 
704 	bh_lock_sock(parent);
705 
706 	/* Check for backlog size */
707 	if (sk_acceptq_is_full(parent)) {
708 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
709 		goto clean;
710 	}
711 
712 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
713 	if (!sk)
714 		goto clean;
715 
716 	chan = l2cap_chan_create(sk);
717 	if (!chan) {
718 		l2cap_sock_kill(sk);
719 		goto clean;
720 	}
721 
722 	l2cap_pi(sk)->chan = chan;
723 
724 	write_lock_bh(&conn->chan_lock);
725 
726 	hci_conn_hold(conn->hcon);
727 
728 	l2cap_sock_init(sk, parent);
729 
730 	bacpy(&bt_sk(sk)->src, conn->src);
731 	bacpy(&bt_sk(sk)->dst, conn->dst);
732 
733 	bt_accept_enqueue(parent, sk);
734 
735 	__l2cap_chan_add(conn, chan);
736 
737 	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
738 
739 	sk->sk_state = BT_CONNECTED;
740 	parent->sk_data_ready(parent, 0);
741 
742 	write_unlock_bh(&conn->chan_lock);
743 
744 clean:
745 	bh_unlock_sock(parent);
746 }
747 
748 static void l2cap_conn_ready(struct l2cap_conn *conn)
749 {
750 	struct l2cap_chan *chan;
751 
752 	BT_DBG("conn %p", conn);
753 
754 	if (!conn->hcon->out && conn->hcon->type == LE_LINK)
755 		l2cap_le_conn_ready(conn);
756 
757 	read_lock(&conn->chan_lock);
758 
759 	list_for_each_entry(chan, &conn->chan_l, list) {
760 		struct sock *sk = chan->sk;
761 
762 		bh_lock_sock(sk);
763 
764 		if (conn->hcon->type == LE_LINK) {
765 			l2cap_sock_clear_timer(sk);
766 			sk->sk_state = BT_CONNECTED;
767 			sk->sk_state_change(sk);
768 		}
769 
770 		if (sk->sk_type != SOCK_SEQPACKET &&
771 				sk->sk_type != SOCK_STREAM) {
772 			l2cap_sock_clear_timer(sk);
773 			sk->sk_state = BT_CONNECTED;
774 			sk->sk_state_change(sk);
775 		} else if (sk->sk_state == BT_CONNECT)
776 			l2cap_do_start(chan);
777 
778 		bh_unlock_sock(sk);
779 	}
780 
781 	read_unlock(&conn->chan_lock);
782 }
783 
784 /* Notify sockets that we cannot guaranty reliability anymore */
785 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
786 {
787 	struct l2cap_chan *chan;
788 
789 	BT_DBG("conn %p", conn);
790 
791 	read_lock(&conn->chan_lock);
792 
793 	list_for_each_entry(chan, &conn->chan_l, list) {
794 		struct sock *sk = chan->sk;
795 
796 		if (chan->force_reliable)
797 			sk->sk_err = err;
798 	}
799 
800 	read_unlock(&conn->chan_lock);
801 }
802 
803 static void l2cap_info_timeout(unsigned long arg)
804 {
805 	struct l2cap_conn *conn = (void *) arg;
806 
807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
808 	conn->info_ident = 0;
809 
810 	l2cap_conn_start(conn);
811 }
812 
813 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
814 {
815 	struct l2cap_conn *conn = hcon->l2cap_data;
816 
817 	if (conn || status)
818 		return conn;
819 
820 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
821 	if (!conn)
822 		return NULL;
823 
824 	hcon->l2cap_data = conn;
825 	conn->hcon = hcon;
826 
827 	BT_DBG("hcon %p conn %p", hcon, conn);
828 
829 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
830 		conn->mtu = hcon->hdev->le_mtu;
831 	else
832 		conn->mtu = hcon->hdev->acl_mtu;
833 
834 	conn->src = &hcon->hdev->bdaddr;
835 	conn->dst = &hcon->dst;
836 
837 	conn->feat_mask = 0;
838 
839 	spin_lock_init(&conn->lock);
840 	rwlock_init(&conn->chan_lock);
841 
842 	INIT_LIST_HEAD(&conn->chan_l);
843 
844 	if (hcon->type != LE_LINK)
845 		setup_timer(&conn->info_timer, l2cap_info_timeout,
846 						(unsigned long) conn);
847 
848 	conn->disc_reason = 0x13;
849 
850 	return conn;
851 }
852 
853 static void l2cap_conn_del(struct hci_conn *hcon, int err)
854 {
855 	struct l2cap_conn *conn = hcon->l2cap_data;
856 	struct l2cap_chan *chan, *l;
857 	struct sock *sk;
858 
859 	if (!conn)
860 		return;
861 
862 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
863 
864 	kfree_skb(conn->rx_skb);
865 
866 	/* Kill channels */
867 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
868 		sk = chan->sk;
869 		bh_lock_sock(sk);
870 		l2cap_chan_del(chan, err);
871 		bh_unlock_sock(sk);
872 		l2cap_sock_kill(sk);
873 	}
874 
875 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
876 		del_timer_sync(&conn->info_timer);
877 
878 	hcon->l2cap_data = NULL;
879 	kfree(conn);
880 }
881 
882 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
883 {
884 	write_lock_bh(&conn->chan_lock);
885 	__l2cap_chan_add(conn, chan);
886 	write_unlock_bh(&conn->chan_lock);
887 }
888 
889 /* ---- Socket interface ---- */
890 
891 /* Find socket with psm and source bdaddr.
892  * Returns closest match.
893  */
894 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
895 {
896 	struct l2cap_chan *c, *c1 = NULL;
897 
898 	read_lock(&chan_list_lock);
899 
900 	list_for_each_entry(c, &chan_list, global_l) {
901 		struct sock *sk = c->sk;
902 
903 		if (state && sk->sk_state != state)
904 			continue;
905 
906 		if (c->psm == psm) {
907 			/* Exact match. */
908 			if (!bacmp(&bt_sk(sk)->src, src)) {
909 				read_unlock(&chan_list_lock);
910 				return c;
911 			}
912 
913 			/* Closest match */
914 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
915 				c1 = c;
916 		}
917 	}
918 
919 	read_unlock(&chan_list_lock);
920 
921 	return c1;
922 }
923 
924 int l2cap_chan_connect(struct l2cap_chan *chan)
925 {
926 	struct sock *sk = chan->sk;
927 	bdaddr_t *src = &bt_sk(sk)->src;
928 	bdaddr_t *dst = &bt_sk(sk)->dst;
929 	struct l2cap_conn *conn;
930 	struct hci_conn *hcon;
931 	struct hci_dev *hdev;
932 	__u8 auth_type;
933 	int err;
934 
935 	BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
936 							chan->psm);
937 
938 	hdev = hci_get_route(dst, src);
939 	if (!hdev)
940 		return -EHOSTUNREACH;
941 
942 	hci_dev_lock_bh(hdev);
943 
944 	auth_type = l2cap_get_auth_type(chan);
945 
946 	if (chan->dcid == L2CAP_CID_LE_DATA)
947 		hcon = hci_connect(hdev, LE_LINK, dst,
948 					chan->sec_level, auth_type);
949 	else
950 		hcon = hci_connect(hdev, ACL_LINK, dst,
951 					chan->sec_level, auth_type);
952 
953 	if (IS_ERR(hcon)) {
954 		err = PTR_ERR(hcon);
955 		goto done;
956 	}
957 
958 	conn = l2cap_conn_add(hcon, 0);
959 	if (!conn) {
960 		hci_conn_put(hcon);
961 		err = -ENOMEM;
962 		goto done;
963 	}
964 
965 	/* Update source addr of the socket */
966 	bacpy(src, conn->src);
967 
968 	l2cap_chan_add(conn, chan);
969 
970 	sk->sk_state = BT_CONNECT;
971 	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
972 
973 	if (hcon->state == BT_CONNECTED) {
974 		if (sk->sk_type != SOCK_SEQPACKET &&
975 				sk->sk_type != SOCK_STREAM) {
976 			l2cap_sock_clear_timer(sk);
977 			if (l2cap_check_security(chan))
978 				sk->sk_state = BT_CONNECTED;
979 		} else
980 			l2cap_do_start(chan);
981 	}
982 
983 	err = 0;
984 
985 done:
986 	hci_dev_unlock_bh(hdev);
987 	hci_dev_put(hdev);
988 	return err;
989 }
990 
991 int __l2cap_wait_ack(struct sock *sk)
992 {
993 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
994 	DECLARE_WAITQUEUE(wait, current);
995 	int err = 0;
996 	int timeo = HZ/5;
997 
998 	add_wait_queue(sk_sleep(sk), &wait);
999 	while ((chan->unacked_frames > 0 && chan->conn)) {
1000 		set_current_state(TASK_INTERRUPTIBLE);
1001 
1002 		if (!timeo)
1003 			timeo = HZ/5;
1004 
1005 		if (signal_pending(current)) {
1006 			err = sock_intr_errno(timeo);
1007 			break;
1008 		}
1009 
1010 		release_sock(sk);
1011 		timeo = schedule_timeout(timeo);
1012 		lock_sock(sk);
1013 
1014 		err = sock_error(sk);
1015 		if (err)
1016 			break;
1017 	}
1018 	set_current_state(TASK_RUNNING);
1019 	remove_wait_queue(sk_sleep(sk), &wait);
1020 	return err;
1021 }
1022 
1023 static void l2cap_monitor_timeout(unsigned long arg)
1024 {
1025 	struct l2cap_chan *chan = (void *) arg;
1026 	struct sock *sk = chan->sk;
1027 
1028 	BT_DBG("chan %p", chan);
1029 
1030 	bh_lock_sock(sk);
1031 	if (chan->retry_count >= chan->remote_max_tx) {
1032 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1033 		bh_unlock_sock(sk);
1034 		return;
1035 	}
1036 
1037 	chan->retry_count++;
1038 	__mod_monitor_timer();
1039 
1040 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1041 	bh_unlock_sock(sk);
1042 }
1043 
1044 static void l2cap_retrans_timeout(unsigned long arg)
1045 {
1046 	struct l2cap_chan *chan = (void *) arg;
1047 	struct sock *sk = chan->sk;
1048 
1049 	BT_DBG("chan %p", chan);
1050 
1051 	bh_lock_sock(sk);
1052 	chan->retry_count = 1;
1053 	__mod_monitor_timer();
1054 
1055 	chan->conn_state |= L2CAP_CONN_WAIT_F;
1056 
1057 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1058 	bh_unlock_sock(sk);
1059 }
1060 
1061 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1062 {
1063 	struct sk_buff *skb;
1064 
1065 	while ((skb = skb_peek(&chan->tx_q)) &&
1066 			chan->unacked_frames) {
1067 		if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1068 			break;
1069 
1070 		skb = skb_dequeue(&chan->tx_q);
1071 		kfree_skb(skb);
1072 
1073 		chan->unacked_frames--;
1074 	}
1075 
1076 	if (!chan->unacked_frames)
1077 		del_timer(&chan->retrans_timer);
1078 }
1079 
1080 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1081 {
1082 	struct hci_conn *hcon = chan->conn->hcon;
1083 	u16 flags;
1084 
1085 	BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1086 
1087 	if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1088 		flags = ACL_START_NO_FLUSH;
1089 	else
1090 		flags = ACL_START;
1091 
1092 	hci_send_acl(hcon, skb, flags);
1093 }
1094 
1095 void l2cap_streaming_send(struct l2cap_chan *chan)
1096 {
1097 	struct sk_buff *skb;
1098 	u16 control, fcs;
1099 
1100 	while ((skb = skb_dequeue(&chan->tx_q))) {
1101 		control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1102 		control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1103 		put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1104 
1105 		if (chan->fcs == L2CAP_FCS_CRC16) {
1106 			fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1107 			put_unaligned_le16(fcs, skb->data + skb->len - 2);
1108 		}
1109 
1110 		l2cap_do_send(chan, skb);
1111 
1112 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1113 	}
1114 }
1115 
1116 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1117 {
1118 	struct sk_buff *skb, *tx_skb;
1119 	u16 control, fcs;
1120 
1121 	skb = skb_peek(&chan->tx_q);
1122 	if (!skb)
1123 		return;
1124 
1125 	do {
1126 		if (bt_cb(skb)->tx_seq == tx_seq)
1127 			break;
1128 
1129 		if (skb_queue_is_last(&chan->tx_q, skb))
1130 			return;
1131 
1132 	} while ((skb = skb_queue_next(&chan->tx_q, skb)));
1133 
1134 	if (chan->remote_max_tx &&
1135 			bt_cb(skb)->retries == chan->remote_max_tx) {
1136 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1137 		return;
1138 	}
1139 
1140 	tx_skb = skb_clone(skb, GFP_ATOMIC);
1141 	bt_cb(skb)->retries++;
1142 	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1143 	control &= L2CAP_CTRL_SAR;
1144 
1145 	if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1146 		control |= L2CAP_CTRL_FINAL;
1147 		chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1148 	}
1149 
1150 	control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1151 			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1152 
1153 	put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1154 
1155 	if (chan->fcs == L2CAP_FCS_CRC16) {
1156 		fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1157 		put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1158 	}
1159 
1160 	l2cap_do_send(chan, tx_skb);
1161 }
1162 
1163 int l2cap_ertm_send(struct l2cap_chan *chan)
1164 {
1165 	struct sk_buff *skb, *tx_skb;
1166 	struct sock *sk = chan->sk;
1167 	u16 control, fcs;
1168 	int nsent = 0;
1169 
1170 	if (sk->sk_state != BT_CONNECTED)
1171 		return -ENOTCONN;
1172 
1173 	while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1174 
1175 		if (chan->remote_max_tx &&
1176 				bt_cb(skb)->retries == chan->remote_max_tx) {
1177 			l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1178 			break;
1179 		}
1180 
1181 		tx_skb = skb_clone(skb, GFP_ATOMIC);
1182 
1183 		bt_cb(skb)->retries++;
1184 
1185 		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1186 		control &= L2CAP_CTRL_SAR;
1187 
1188 		if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1189 			control |= L2CAP_CTRL_FINAL;
1190 			chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1191 		}
1192 		control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1193 				| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1194 		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1195 
1196 
1197 		if (chan->fcs == L2CAP_FCS_CRC16) {
1198 			fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1199 			put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1200 		}
1201 
1202 		l2cap_do_send(chan, tx_skb);
1203 
1204 		__mod_retrans_timer();
1205 
1206 		bt_cb(skb)->tx_seq = chan->next_tx_seq;
1207 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1208 
1209 		if (bt_cb(skb)->retries == 1)
1210 			chan->unacked_frames++;
1211 
1212 		chan->frames_sent++;
1213 
1214 		if (skb_queue_is_last(&chan->tx_q, skb))
1215 			chan->tx_send_head = NULL;
1216 		else
1217 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1218 
1219 		nsent++;
1220 	}
1221 
1222 	return nsent;
1223 }
1224 
1225 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1226 {
1227 	int ret;
1228 
1229 	if (!skb_queue_empty(&chan->tx_q))
1230 		chan->tx_send_head = chan->tx_q.next;
1231 
1232 	chan->next_tx_seq = chan->expected_ack_seq;
1233 	ret = l2cap_ertm_send(chan);
1234 	return ret;
1235 }
1236 
1237 static void l2cap_send_ack(struct l2cap_chan *chan)
1238 {
1239 	u16 control = 0;
1240 
1241 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1242 
1243 	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1244 		control |= L2CAP_SUPER_RCV_NOT_READY;
1245 		chan->conn_state |= L2CAP_CONN_RNR_SENT;
1246 		l2cap_send_sframe(chan, control);
1247 		return;
1248 	}
1249 
1250 	if (l2cap_ertm_send(chan) > 0)
1251 		return;
1252 
1253 	control |= L2CAP_SUPER_RCV_READY;
1254 	l2cap_send_sframe(chan, control);
1255 }
1256 
1257 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1258 {
1259 	struct srej_list *tail;
1260 	u16 control;
1261 
1262 	control = L2CAP_SUPER_SELECT_REJECT;
1263 	control |= L2CAP_CTRL_FINAL;
1264 
1265 	tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1266 	control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1267 
1268 	l2cap_send_sframe(chan, control);
1269 }
1270 
1271 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1272 {
1273 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1274 	struct sk_buff **frag;
1275 	int err, sent = 0;
1276 
1277 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1278 		return -EFAULT;
1279 
1280 	sent += count;
1281 	len  -= count;
1282 
1283 	/* Continuation fragments (no L2CAP header) */
1284 	frag = &skb_shinfo(skb)->frag_list;
1285 	while (len) {
1286 		count = min_t(unsigned int, conn->mtu, len);
1287 
1288 		*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1289 		if (!*frag)
1290 			return err;
1291 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1292 			return -EFAULT;
1293 
1294 		sent += count;
1295 		len  -= count;
1296 
1297 		frag = &(*frag)->next;
1298 	}
1299 
1300 	return sent;
1301 }
1302 
1303 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1304 {
1305 	struct sock *sk = chan->sk;
1306 	struct l2cap_conn *conn = chan->conn;
1307 	struct sk_buff *skb;
1308 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1309 	struct l2cap_hdr *lh;
1310 
1311 	BT_DBG("sk %p len %d", sk, (int)len);
1312 
1313 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1314 	skb = bt_skb_send_alloc(sk, count + hlen,
1315 			msg->msg_flags & MSG_DONTWAIT, &err);
1316 	if (!skb)
1317 		return ERR_PTR(err);
1318 
1319 	/* Create L2CAP header */
1320 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1321 	lh->cid = cpu_to_le16(chan->dcid);
1322 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1323 	put_unaligned_le16(chan->psm, skb_put(skb, 2));
1324 
1325 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1326 	if (unlikely(err < 0)) {
1327 		kfree_skb(skb);
1328 		return ERR_PTR(err);
1329 	}
1330 	return skb;
1331 }
1332 
1333 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1334 {
1335 	struct sock *sk = chan->sk;
1336 	struct l2cap_conn *conn = chan->conn;
1337 	struct sk_buff *skb;
1338 	int err, count, hlen = L2CAP_HDR_SIZE;
1339 	struct l2cap_hdr *lh;
1340 
1341 	BT_DBG("sk %p len %d", sk, (int)len);
1342 
1343 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1344 	skb = bt_skb_send_alloc(sk, count + hlen,
1345 			msg->msg_flags & MSG_DONTWAIT, &err);
1346 	if (!skb)
1347 		return ERR_PTR(err);
1348 
1349 	/* Create L2CAP header */
1350 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1351 	lh->cid = cpu_to_le16(chan->dcid);
1352 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1353 
1354 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1355 	if (unlikely(err < 0)) {
1356 		kfree_skb(skb);
1357 		return ERR_PTR(err);
1358 	}
1359 	return skb;
1360 }
1361 
1362 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1363 {
1364 	struct sock *sk = chan->sk;
1365 	struct l2cap_conn *conn = chan->conn;
1366 	struct sk_buff *skb;
1367 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1368 	struct l2cap_hdr *lh;
1369 
1370 	BT_DBG("sk %p len %d", sk, (int)len);
1371 
1372 	if (!conn)
1373 		return ERR_PTR(-ENOTCONN);
1374 
1375 	if (sdulen)
1376 		hlen += 2;
1377 
1378 	if (chan->fcs == L2CAP_FCS_CRC16)
1379 		hlen += 2;
1380 
1381 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1382 	skb = bt_skb_send_alloc(sk, count + hlen,
1383 			msg->msg_flags & MSG_DONTWAIT, &err);
1384 	if (!skb)
1385 		return ERR_PTR(err);
1386 
1387 	/* Create L2CAP header */
1388 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1389 	lh->cid = cpu_to_le16(chan->dcid);
1390 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1391 	put_unaligned_le16(control, skb_put(skb, 2));
1392 	if (sdulen)
1393 		put_unaligned_le16(sdulen, skb_put(skb, 2));
1394 
1395 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1396 	if (unlikely(err < 0)) {
1397 		kfree_skb(skb);
1398 		return ERR_PTR(err);
1399 	}
1400 
1401 	if (chan->fcs == L2CAP_FCS_CRC16)
1402 		put_unaligned_le16(0, skb_put(skb, 2));
1403 
1404 	bt_cb(skb)->retries = 0;
1405 	return skb;
1406 }
1407 
1408 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1409 {
1410 	struct sk_buff *skb;
1411 	struct sk_buff_head sar_queue;
1412 	u16 control;
1413 	size_t size = 0;
1414 
1415 	skb_queue_head_init(&sar_queue);
1416 	control = L2CAP_SDU_START;
1417 	skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1418 	if (IS_ERR(skb))
1419 		return PTR_ERR(skb);
1420 
1421 	__skb_queue_tail(&sar_queue, skb);
1422 	len -= chan->remote_mps;
1423 	size += chan->remote_mps;
1424 
1425 	while (len > 0) {
1426 		size_t buflen;
1427 
1428 		if (len > chan->remote_mps) {
1429 			control = L2CAP_SDU_CONTINUE;
1430 			buflen = chan->remote_mps;
1431 		} else {
1432 			control = L2CAP_SDU_END;
1433 			buflen = len;
1434 		}
1435 
1436 		skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1437 		if (IS_ERR(skb)) {
1438 			skb_queue_purge(&sar_queue);
1439 			return PTR_ERR(skb);
1440 		}
1441 
1442 		__skb_queue_tail(&sar_queue, skb);
1443 		len -= buflen;
1444 		size += buflen;
1445 	}
1446 	skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1447 	if (chan->tx_send_head == NULL)
1448 		chan->tx_send_head = sar_queue.next;
1449 
1450 	return size;
1451 }
1452 
1453 static void l2cap_chan_ready(struct sock *sk)
1454 {
1455 	struct sock *parent = bt_sk(sk)->parent;
1456 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1457 
1458 	BT_DBG("sk %p, parent %p", sk, parent);
1459 
1460 	chan->conf_state = 0;
1461 	l2cap_sock_clear_timer(sk);
1462 
1463 	if (!parent) {
1464 		/* Outgoing channel.
1465 		 * Wake up socket sleeping on connect.
1466 		 */
1467 		sk->sk_state = BT_CONNECTED;
1468 		sk->sk_state_change(sk);
1469 	} else {
1470 		/* Incoming channel.
1471 		 * Wake up socket sleeping on accept.
1472 		 */
1473 		parent->sk_data_ready(parent, 0);
1474 	}
1475 }
1476 
1477 /* Copy frame to all raw sockets on that connection */
1478 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1479 {
1480 	struct sk_buff *nskb;
1481 	struct l2cap_chan *chan;
1482 
1483 	BT_DBG("conn %p", conn);
1484 
1485 	read_lock(&conn->chan_lock);
1486 	list_for_each_entry(chan, &conn->chan_l, list) {
1487 		struct sock *sk = chan->sk;
1488 		if (sk->sk_type != SOCK_RAW)
1489 			continue;
1490 
1491 		/* Don't send frame to the socket it came from */
1492 		if (skb->sk == sk)
1493 			continue;
1494 		nskb = skb_clone(skb, GFP_ATOMIC);
1495 		if (!nskb)
1496 			continue;
1497 
1498 		if (sock_queue_rcv_skb(sk, nskb))
1499 			kfree_skb(nskb);
1500 	}
1501 	read_unlock(&conn->chan_lock);
1502 }
1503 
1504 /* ---- L2CAP signalling commands ---- */
1505 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1506 				u8 code, u8 ident, u16 dlen, void *data)
1507 {
1508 	struct sk_buff *skb, **frag;
1509 	struct l2cap_cmd_hdr *cmd;
1510 	struct l2cap_hdr *lh;
1511 	int len, count;
1512 
1513 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1514 			conn, code, ident, dlen);
1515 
1516 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1517 	count = min_t(unsigned int, conn->mtu, len);
1518 
1519 	skb = bt_skb_alloc(count, GFP_ATOMIC);
1520 	if (!skb)
1521 		return NULL;
1522 
1523 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1525 
1526 	if (conn->hcon->type == LE_LINK)
1527 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1528 	else
1529 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1530 
1531 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1532 	cmd->code  = code;
1533 	cmd->ident = ident;
1534 	cmd->len   = cpu_to_le16(dlen);
1535 
1536 	if (dlen) {
1537 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1538 		memcpy(skb_put(skb, count), data, count);
1539 		data += count;
1540 	}
1541 
1542 	len -= skb->len;
1543 
1544 	/* Continuation fragments (no L2CAP header) */
1545 	frag = &skb_shinfo(skb)->frag_list;
1546 	while (len) {
1547 		count = min_t(unsigned int, conn->mtu, len);
1548 
1549 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
1550 		if (!*frag)
1551 			goto fail;
1552 
1553 		memcpy(skb_put(*frag, count), data, count);
1554 
1555 		len  -= count;
1556 		data += count;
1557 
1558 		frag = &(*frag)->next;
1559 	}
1560 
1561 	return skb;
1562 
1563 fail:
1564 	kfree_skb(skb);
1565 	return NULL;
1566 }
1567 
1568 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1569 {
1570 	struct l2cap_conf_opt *opt = *ptr;
1571 	int len;
1572 
1573 	len = L2CAP_CONF_OPT_SIZE + opt->len;
1574 	*ptr += len;
1575 
1576 	*type = opt->type;
1577 	*olen = opt->len;
1578 
1579 	switch (opt->len) {
1580 	case 1:
1581 		*val = *((u8 *) opt->val);
1582 		break;
1583 
1584 	case 2:
1585 		*val = get_unaligned_le16(opt->val);
1586 		break;
1587 
1588 	case 4:
1589 		*val = get_unaligned_le32(opt->val);
1590 		break;
1591 
1592 	default:
1593 		*val = (unsigned long) opt->val;
1594 		break;
1595 	}
1596 
1597 	BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1598 	return len;
1599 }
1600 
1601 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1602 {
1603 	struct l2cap_conf_opt *opt = *ptr;
1604 
1605 	BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1606 
1607 	opt->type = type;
1608 	opt->len  = len;
1609 
1610 	switch (len) {
1611 	case 1:
1612 		*((u8 *) opt->val)  = val;
1613 		break;
1614 
1615 	case 2:
1616 		put_unaligned_le16(val, opt->val);
1617 		break;
1618 
1619 	case 4:
1620 		put_unaligned_le32(val, opt->val);
1621 		break;
1622 
1623 	default:
1624 		memcpy(opt->val, (void *) val, len);
1625 		break;
1626 	}
1627 
1628 	*ptr += L2CAP_CONF_OPT_SIZE + len;
1629 }
1630 
1631 static void l2cap_ack_timeout(unsigned long arg)
1632 {
1633 	struct l2cap_chan *chan = (void *) arg;
1634 
1635 	bh_lock_sock(chan->sk);
1636 	l2cap_send_ack(chan);
1637 	bh_unlock_sock(chan->sk);
1638 }
1639 
1640 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1641 {
1642 	struct sock *sk = chan->sk;
1643 
1644 	chan->expected_ack_seq = 0;
1645 	chan->unacked_frames = 0;
1646 	chan->buffer_seq = 0;
1647 	chan->num_acked = 0;
1648 	chan->frames_sent = 0;
1649 
1650 	setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1651 							(unsigned long) chan);
1652 	setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1653 							(unsigned long) chan);
1654 	setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1655 
1656 	skb_queue_head_init(&chan->srej_q);
1657 	skb_queue_head_init(&chan->busy_q);
1658 
1659 	INIT_LIST_HEAD(&chan->srej_l);
1660 
1661 	INIT_WORK(&chan->busy_work, l2cap_busy_work);
1662 
1663 	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1664 }
1665 
1666 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1667 {
1668 	switch (mode) {
1669 	case L2CAP_MODE_STREAMING:
1670 	case L2CAP_MODE_ERTM:
1671 		if (l2cap_mode_supported(mode, remote_feat_mask))
1672 			return mode;
1673 		/* fall through */
1674 	default:
1675 		return L2CAP_MODE_BASIC;
1676 	}
1677 }
1678 
1679 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1680 {
1681 	struct l2cap_conf_req *req = data;
1682 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1683 	void *ptr = req->data;
1684 
1685 	BT_DBG("chan %p", chan);
1686 
1687 	if (chan->num_conf_req || chan->num_conf_rsp)
1688 		goto done;
1689 
1690 	switch (chan->mode) {
1691 	case L2CAP_MODE_STREAMING:
1692 	case L2CAP_MODE_ERTM:
1693 		if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1694 			break;
1695 
1696 		/* fall through */
1697 	default:
1698 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1699 		break;
1700 	}
1701 
1702 done:
1703 	if (chan->imtu != L2CAP_DEFAULT_MTU)
1704 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1705 
1706 	switch (chan->mode) {
1707 	case L2CAP_MODE_BASIC:
1708 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1709 				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1710 			break;
1711 
1712 		rfc.mode            = L2CAP_MODE_BASIC;
1713 		rfc.txwin_size      = 0;
1714 		rfc.max_transmit    = 0;
1715 		rfc.retrans_timeout = 0;
1716 		rfc.monitor_timeout = 0;
1717 		rfc.max_pdu_size    = 0;
1718 
1719 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1720 							(unsigned long) &rfc);
1721 		break;
1722 
1723 	case L2CAP_MODE_ERTM:
1724 		rfc.mode            = L2CAP_MODE_ERTM;
1725 		rfc.txwin_size      = chan->tx_win;
1726 		rfc.max_transmit    = chan->max_tx;
1727 		rfc.retrans_timeout = 0;
1728 		rfc.monitor_timeout = 0;
1729 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1730 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1731 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1732 
1733 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1734 							(unsigned long) &rfc);
1735 
1736 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1737 			break;
1738 
1739 		if (chan->fcs == L2CAP_FCS_NONE ||
1740 				chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1741 			chan->fcs = L2CAP_FCS_NONE;
1742 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1743 		}
1744 		break;
1745 
1746 	case L2CAP_MODE_STREAMING:
1747 		rfc.mode            = L2CAP_MODE_STREAMING;
1748 		rfc.txwin_size      = 0;
1749 		rfc.max_transmit    = 0;
1750 		rfc.retrans_timeout = 0;
1751 		rfc.monitor_timeout = 0;
1752 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1753 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1754 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1755 
1756 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1757 							(unsigned long) &rfc);
1758 
1759 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1760 			break;
1761 
1762 		if (chan->fcs == L2CAP_FCS_NONE ||
1763 				chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1764 			chan->fcs = L2CAP_FCS_NONE;
1765 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1766 		}
1767 		break;
1768 	}
1769 
1770 	req->dcid  = cpu_to_le16(chan->dcid);
1771 	req->flags = cpu_to_le16(0);
1772 
1773 	return ptr - data;
1774 }
1775 
1776 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1777 {
1778 	struct l2cap_conf_rsp *rsp = data;
1779 	void *ptr = rsp->data;
1780 	void *req = chan->conf_req;
1781 	int len = chan->conf_len;
1782 	int type, hint, olen;
1783 	unsigned long val;
1784 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1785 	u16 mtu = L2CAP_DEFAULT_MTU;
1786 	u16 result = L2CAP_CONF_SUCCESS;
1787 
1788 	BT_DBG("chan %p", chan);
1789 
1790 	while (len >= L2CAP_CONF_OPT_SIZE) {
1791 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1792 
1793 		hint  = type & L2CAP_CONF_HINT;
1794 		type &= L2CAP_CONF_MASK;
1795 
1796 		switch (type) {
1797 		case L2CAP_CONF_MTU:
1798 			mtu = val;
1799 			break;
1800 
1801 		case L2CAP_CONF_FLUSH_TO:
1802 			chan->flush_to = val;
1803 			break;
1804 
1805 		case L2CAP_CONF_QOS:
1806 			break;
1807 
1808 		case L2CAP_CONF_RFC:
1809 			if (olen == sizeof(rfc))
1810 				memcpy(&rfc, (void *) val, olen);
1811 			break;
1812 
1813 		case L2CAP_CONF_FCS:
1814 			if (val == L2CAP_FCS_NONE)
1815 				chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1816 
1817 			break;
1818 
1819 		default:
1820 			if (hint)
1821 				break;
1822 
1823 			result = L2CAP_CONF_UNKNOWN;
1824 			*((u8 *) ptr++) = type;
1825 			break;
1826 		}
1827 	}
1828 
1829 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
1830 		goto done;
1831 
1832 	switch (chan->mode) {
1833 	case L2CAP_MODE_STREAMING:
1834 	case L2CAP_MODE_ERTM:
1835 		if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1836 			chan->mode = l2cap_select_mode(rfc.mode,
1837 					chan->conn->feat_mask);
1838 			break;
1839 		}
1840 
1841 		if (chan->mode != rfc.mode)
1842 			return -ECONNREFUSED;
1843 
1844 		break;
1845 	}
1846 
1847 done:
1848 	if (chan->mode != rfc.mode) {
1849 		result = L2CAP_CONF_UNACCEPT;
1850 		rfc.mode = chan->mode;
1851 
1852 		if (chan->num_conf_rsp == 1)
1853 			return -ECONNREFUSED;
1854 
1855 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1856 					sizeof(rfc), (unsigned long) &rfc);
1857 	}
1858 
1859 
1860 	if (result == L2CAP_CONF_SUCCESS) {
1861 		/* Configure output options and let the other side know
1862 		 * which ones we don't like. */
1863 
1864 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
1865 			result = L2CAP_CONF_UNACCEPT;
1866 		else {
1867 			chan->omtu = mtu;
1868 			chan->conf_state |= L2CAP_CONF_MTU_DONE;
1869 		}
1870 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1871 
1872 		switch (rfc.mode) {
1873 		case L2CAP_MODE_BASIC:
1874 			chan->fcs = L2CAP_FCS_NONE;
1875 			chan->conf_state |= L2CAP_CONF_MODE_DONE;
1876 			break;
1877 
1878 		case L2CAP_MODE_ERTM:
1879 			chan->remote_tx_win = rfc.txwin_size;
1880 			chan->remote_max_tx = rfc.max_transmit;
1881 
1882 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1883 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1884 
1885 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1886 
1887 			rfc.retrans_timeout =
1888 				le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1889 			rfc.monitor_timeout =
1890 				le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1891 
1892 			chan->conf_state |= L2CAP_CONF_MODE_DONE;
1893 
1894 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1895 					sizeof(rfc), (unsigned long) &rfc);
1896 
1897 			break;
1898 
1899 		case L2CAP_MODE_STREAMING:
1900 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1901 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1902 
1903 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1904 
1905 			chan->conf_state |= L2CAP_CONF_MODE_DONE;
1906 
1907 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1908 					sizeof(rfc), (unsigned long) &rfc);
1909 
1910 			break;
1911 
1912 		default:
1913 			result = L2CAP_CONF_UNACCEPT;
1914 
1915 			memset(&rfc, 0, sizeof(rfc));
1916 			rfc.mode = chan->mode;
1917 		}
1918 
1919 		if (result == L2CAP_CONF_SUCCESS)
1920 			chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1921 	}
1922 	rsp->scid   = cpu_to_le16(chan->dcid);
1923 	rsp->result = cpu_to_le16(result);
1924 	rsp->flags  = cpu_to_le16(0x0000);
1925 
1926 	return ptr - data;
1927 }
1928 
1929 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1930 {
1931 	struct l2cap_conf_req *req = data;
1932 	void *ptr = req->data;
1933 	int type, olen;
1934 	unsigned long val;
1935 	struct l2cap_conf_rfc rfc;
1936 
1937 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1938 
1939 	while (len >= L2CAP_CONF_OPT_SIZE) {
1940 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1941 
1942 		switch (type) {
1943 		case L2CAP_CONF_MTU:
1944 			if (val < L2CAP_DEFAULT_MIN_MTU) {
1945 				*result = L2CAP_CONF_UNACCEPT;
1946 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1947 			} else
1948 				chan->imtu = val;
1949 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1950 			break;
1951 
1952 		case L2CAP_CONF_FLUSH_TO:
1953 			chan->flush_to = val;
1954 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1955 							2, chan->flush_to);
1956 			break;
1957 
1958 		case L2CAP_CONF_RFC:
1959 			if (olen == sizeof(rfc))
1960 				memcpy(&rfc, (void *)val, olen);
1961 
1962 			if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1963 							rfc.mode != chan->mode)
1964 				return -ECONNREFUSED;
1965 
1966 			chan->fcs = 0;
1967 
1968 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1969 					sizeof(rfc), (unsigned long) &rfc);
1970 			break;
1971 		}
1972 	}
1973 
1974 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1975 		return -ECONNREFUSED;
1976 
1977 	chan->mode = rfc.mode;
1978 
1979 	if (*result == L2CAP_CONF_SUCCESS) {
1980 		switch (rfc.mode) {
1981 		case L2CAP_MODE_ERTM:
1982 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1983 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1984 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
1985 			break;
1986 		case L2CAP_MODE_STREAMING:
1987 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
1988 		}
1989 	}
1990 
1991 	req->dcid   = cpu_to_le16(chan->dcid);
1992 	req->flags  = cpu_to_le16(0x0000);
1993 
1994 	return ptr - data;
1995 }
1996 
1997 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1998 {
1999 	struct l2cap_conf_rsp *rsp = data;
2000 	void *ptr = rsp->data;
2001 
2002 	BT_DBG("chan %p", chan);
2003 
2004 	rsp->scid   = cpu_to_le16(chan->dcid);
2005 	rsp->result = cpu_to_le16(result);
2006 	rsp->flags  = cpu_to_le16(flags);
2007 
2008 	return ptr - data;
2009 }
2010 
2011 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2012 {
2013 	struct l2cap_conn_rsp rsp;
2014 	struct l2cap_conn *conn = chan->conn;
2015 	u8 buf[128];
2016 
2017 	rsp.scid   = cpu_to_le16(chan->dcid);
2018 	rsp.dcid   = cpu_to_le16(chan->scid);
2019 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2020 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2021 	l2cap_send_cmd(conn, chan->ident,
2022 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2023 
2024 	if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2025 		return;
2026 
2027 	chan->conf_state |= L2CAP_CONF_REQ_SENT;
2028 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2029 			l2cap_build_conf_req(chan, buf), buf);
2030 	chan->num_conf_req++;
2031 }
2032 
2033 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2034 {
2035 	int type, olen;
2036 	unsigned long val;
2037 	struct l2cap_conf_rfc rfc;
2038 
2039 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2040 
2041 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2042 		return;
2043 
2044 	while (len >= L2CAP_CONF_OPT_SIZE) {
2045 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2046 
2047 		switch (type) {
2048 		case L2CAP_CONF_RFC:
2049 			if (olen == sizeof(rfc))
2050 				memcpy(&rfc, (void *)val, olen);
2051 			goto done;
2052 		}
2053 	}
2054 
2055 done:
2056 	switch (rfc.mode) {
2057 	case L2CAP_MODE_ERTM:
2058 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2059 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2060 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2061 		break;
2062 	case L2CAP_MODE_STREAMING:
2063 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2064 	}
2065 }
2066 
2067 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2068 {
2069 	struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2070 
2071 	if (rej->reason != 0x0000)
2072 		return 0;
2073 
2074 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2075 					cmd->ident == conn->info_ident) {
2076 		del_timer(&conn->info_timer);
2077 
2078 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2079 		conn->info_ident = 0;
2080 
2081 		l2cap_conn_start(conn);
2082 	}
2083 
2084 	return 0;
2085 }
2086 
2087 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2088 {
2089 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2090 	struct l2cap_conn_rsp rsp;
2091 	struct l2cap_chan *chan = NULL, *pchan;
2092 	struct sock *parent, *sk = NULL;
2093 	int result, status = L2CAP_CS_NO_INFO;
2094 
2095 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2096 	__le16 psm = req->psm;
2097 
2098 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2099 
2100 	/* Check if we have socket listening on psm */
2101 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2102 	if (!pchan) {
2103 		result = L2CAP_CR_BAD_PSM;
2104 		goto sendresp;
2105 	}
2106 
2107 	parent = pchan->sk;
2108 
2109 	bh_lock_sock(parent);
2110 
2111 	/* Check if the ACL is secure enough (if not SDP) */
2112 	if (psm != cpu_to_le16(0x0001) &&
2113 				!hci_conn_check_link_mode(conn->hcon)) {
2114 		conn->disc_reason = 0x05;
2115 		result = L2CAP_CR_SEC_BLOCK;
2116 		goto response;
2117 	}
2118 
2119 	result = L2CAP_CR_NO_MEM;
2120 
2121 	/* Check for backlog size */
2122 	if (sk_acceptq_is_full(parent)) {
2123 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
2124 		goto response;
2125 	}
2126 
2127 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2128 	if (!sk)
2129 		goto response;
2130 
2131 	chan = l2cap_chan_create(sk);
2132 	if (!chan) {
2133 		l2cap_sock_kill(sk);
2134 		goto response;
2135 	}
2136 
2137 	l2cap_pi(sk)->chan = chan;
2138 
2139 	write_lock_bh(&conn->chan_lock);
2140 
2141 	/* Check if we already have channel with that dcid */
2142 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
2143 		write_unlock_bh(&conn->chan_lock);
2144 		sock_set_flag(sk, SOCK_ZAPPED);
2145 		l2cap_sock_kill(sk);
2146 		goto response;
2147 	}
2148 
2149 	hci_conn_hold(conn->hcon);
2150 
2151 	l2cap_sock_init(sk, parent);
2152 	bacpy(&bt_sk(sk)->src, conn->src);
2153 	bacpy(&bt_sk(sk)->dst, conn->dst);
2154 	chan->psm  = psm;
2155 	chan->dcid = scid;
2156 
2157 	bt_accept_enqueue(parent, sk);
2158 
2159 	__l2cap_chan_add(conn, chan);
2160 
2161 	dcid = chan->scid;
2162 
2163 	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2164 
2165 	chan->ident = cmd->ident;
2166 
2167 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2168 		if (l2cap_check_security(chan)) {
2169 			if (bt_sk(sk)->defer_setup) {
2170 				sk->sk_state = BT_CONNECT2;
2171 				result = L2CAP_CR_PEND;
2172 				status = L2CAP_CS_AUTHOR_PEND;
2173 				parent->sk_data_ready(parent, 0);
2174 			} else {
2175 				sk->sk_state = BT_CONFIG;
2176 				result = L2CAP_CR_SUCCESS;
2177 				status = L2CAP_CS_NO_INFO;
2178 			}
2179 		} else {
2180 			sk->sk_state = BT_CONNECT2;
2181 			result = L2CAP_CR_PEND;
2182 			status = L2CAP_CS_AUTHEN_PEND;
2183 		}
2184 	} else {
2185 		sk->sk_state = BT_CONNECT2;
2186 		result = L2CAP_CR_PEND;
2187 		status = L2CAP_CS_NO_INFO;
2188 	}
2189 
2190 	write_unlock_bh(&conn->chan_lock);
2191 
2192 response:
2193 	bh_unlock_sock(parent);
2194 
2195 sendresp:
2196 	rsp.scid   = cpu_to_le16(scid);
2197 	rsp.dcid   = cpu_to_le16(dcid);
2198 	rsp.result = cpu_to_le16(result);
2199 	rsp.status = cpu_to_le16(status);
2200 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2201 
2202 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2203 		struct l2cap_info_req info;
2204 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2205 
2206 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2207 		conn->info_ident = l2cap_get_ident(conn);
2208 
2209 		mod_timer(&conn->info_timer, jiffies +
2210 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2211 
2212 		l2cap_send_cmd(conn, conn->info_ident,
2213 					L2CAP_INFO_REQ, sizeof(info), &info);
2214 	}
2215 
2216 	if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2217 				result == L2CAP_CR_SUCCESS) {
2218 		u8 buf[128];
2219 		chan->conf_state |= L2CAP_CONF_REQ_SENT;
2220 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2221 					l2cap_build_conf_req(chan, buf), buf);
2222 		chan->num_conf_req++;
2223 	}
2224 
2225 	return 0;
2226 }
2227 
2228 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2229 {
2230 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2231 	u16 scid, dcid, result, status;
2232 	struct l2cap_chan *chan;
2233 	struct sock *sk;
2234 	u8 req[128];
2235 
2236 	scid   = __le16_to_cpu(rsp->scid);
2237 	dcid   = __le16_to_cpu(rsp->dcid);
2238 	result = __le16_to_cpu(rsp->result);
2239 	status = __le16_to_cpu(rsp->status);
2240 
2241 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2242 
2243 	if (scid) {
2244 		chan = l2cap_get_chan_by_scid(conn, scid);
2245 		if (!chan)
2246 			return -EFAULT;
2247 	} else {
2248 		chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2249 		if (!chan)
2250 			return -EFAULT;
2251 	}
2252 
2253 	sk = chan->sk;
2254 
2255 	switch (result) {
2256 	case L2CAP_CR_SUCCESS:
2257 		sk->sk_state = BT_CONFIG;
2258 		chan->ident = 0;
2259 		chan->dcid = dcid;
2260 		chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2261 
2262 		if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2263 			break;
2264 
2265 		chan->conf_state |= L2CAP_CONF_REQ_SENT;
2266 
2267 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2268 					l2cap_build_conf_req(chan, req), req);
2269 		chan->num_conf_req++;
2270 		break;
2271 
2272 	case L2CAP_CR_PEND:
2273 		chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2274 		break;
2275 
2276 	default:
2277 		/* don't delete l2cap channel if sk is owned by user */
2278 		if (sock_owned_by_user(sk)) {
2279 			sk->sk_state = BT_DISCONN;
2280 			l2cap_sock_clear_timer(sk);
2281 			l2cap_sock_set_timer(sk, HZ / 5);
2282 			break;
2283 		}
2284 
2285 		l2cap_chan_del(chan, ECONNREFUSED);
2286 		break;
2287 	}
2288 
2289 	bh_unlock_sock(sk);
2290 	return 0;
2291 }
2292 
2293 static inline void set_default_fcs(struct l2cap_chan *chan)
2294 {
2295 	struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2296 
2297 	/* FCS is enabled only in ERTM or streaming mode, if one or both
2298 	 * sides request it.
2299 	 */
2300 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2301 		chan->fcs = L2CAP_FCS_NONE;
2302 	else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2303 		chan->fcs = L2CAP_FCS_CRC16;
2304 }
2305 
2306 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2307 {
2308 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2309 	u16 dcid, flags;
2310 	u8 rsp[64];
2311 	struct l2cap_chan *chan;
2312 	struct sock *sk;
2313 	int len;
2314 
2315 	dcid  = __le16_to_cpu(req->dcid);
2316 	flags = __le16_to_cpu(req->flags);
2317 
2318 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2319 
2320 	chan = l2cap_get_chan_by_scid(conn, dcid);
2321 	if (!chan)
2322 		return -ENOENT;
2323 
2324 	sk = chan->sk;
2325 
2326 	if (sk->sk_state != BT_CONFIG) {
2327 		struct l2cap_cmd_rej rej;
2328 
2329 		rej.reason = cpu_to_le16(0x0002);
2330 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2331 				sizeof(rej), &rej);
2332 		goto unlock;
2333 	}
2334 
2335 	/* Reject if config buffer is too small. */
2336 	len = cmd_len - sizeof(*req);
2337 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
2338 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2339 				l2cap_build_conf_rsp(chan, rsp,
2340 					L2CAP_CONF_REJECT, flags), rsp);
2341 		goto unlock;
2342 	}
2343 
2344 	/* Store config. */
2345 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
2346 	chan->conf_len += len;
2347 
2348 	if (flags & 0x0001) {
2349 		/* Incomplete config. Send empty response. */
2350 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2351 				l2cap_build_conf_rsp(chan, rsp,
2352 					L2CAP_CONF_SUCCESS, 0x0001), rsp);
2353 		goto unlock;
2354 	}
2355 
2356 	/* Complete config. */
2357 	len = l2cap_parse_conf_req(chan, rsp);
2358 	if (len < 0) {
2359 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2360 		goto unlock;
2361 	}
2362 
2363 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2364 	chan->num_conf_rsp++;
2365 
2366 	/* Reset config buffer. */
2367 	chan->conf_len = 0;
2368 
2369 	if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2370 		goto unlock;
2371 
2372 	if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2373 		set_default_fcs(chan);
2374 
2375 		sk->sk_state = BT_CONNECTED;
2376 
2377 		chan->next_tx_seq = 0;
2378 		chan->expected_tx_seq = 0;
2379 		skb_queue_head_init(&chan->tx_q);
2380 		if (chan->mode == L2CAP_MODE_ERTM)
2381 			l2cap_ertm_init(chan);
2382 
2383 		l2cap_chan_ready(sk);
2384 		goto unlock;
2385 	}
2386 
2387 	if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2388 		u8 buf[64];
2389 		chan->conf_state |= L2CAP_CONF_REQ_SENT;
2390 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2391 					l2cap_build_conf_req(chan, buf), buf);
2392 		chan->num_conf_req++;
2393 	}
2394 
2395 unlock:
2396 	bh_unlock_sock(sk);
2397 	return 0;
2398 }
2399 
2400 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2401 {
2402 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2403 	u16 scid, flags, result;
2404 	struct l2cap_chan *chan;
2405 	struct sock *sk;
2406 	int len = cmd->len - sizeof(*rsp);
2407 
2408 	scid   = __le16_to_cpu(rsp->scid);
2409 	flags  = __le16_to_cpu(rsp->flags);
2410 	result = __le16_to_cpu(rsp->result);
2411 
2412 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2413 			scid, flags, result);
2414 
2415 	chan = l2cap_get_chan_by_scid(conn, scid);
2416 	if (!chan)
2417 		return 0;
2418 
2419 	sk = chan->sk;
2420 
2421 	switch (result) {
2422 	case L2CAP_CONF_SUCCESS:
2423 		l2cap_conf_rfc_get(chan, rsp->data, len);
2424 		break;
2425 
2426 	case L2CAP_CONF_UNACCEPT:
2427 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2428 			char req[64];
2429 
2430 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2431 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2432 				goto done;
2433 			}
2434 
2435 			/* throw out any old stored conf requests */
2436 			result = L2CAP_CONF_SUCCESS;
2437 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2438 								req, &result);
2439 			if (len < 0) {
2440 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2441 				goto done;
2442 			}
2443 
2444 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
2445 						L2CAP_CONF_REQ, len, req);
2446 			chan->num_conf_req++;
2447 			if (result != L2CAP_CONF_SUCCESS)
2448 				goto done;
2449 			break;
2450 		}
2451 
2452 	default:
2453 		sk->sk_err = ECONNRESET;
2454 		l2cap_sock_set_timer(sk, HZ * 5);
2455 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2456 		goto done;
2457 	}
2458 
2459 	if (flags & 0x01)
2460 		goto done;
2461 
2462 	chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2463 
2464 	if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2465 		set_default_fcs(chan);
2466 
2467 		sk->sk_state = BT_CONNECTED;
2468 		chan->next_tx_seq = 0;
2469 		chan->expected_tx_seq = 0;
2470 		skb_queue_head_init(&chan->tx_q);
2471 		if (chan->mode ==  L2CAP_MODE_ERTM)
2472 			l2cap_ertm_init(chan);
2473 
2474 		l2cap_chan_ready(sk);
2475 	}
2476 
2477 done:
2478 	bh_unlock_sock(sk);
2479 	return 0;
2480 }
2481 
2482 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2483 {
2484 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2485 	struct l2cap_disconn_rsp rsp;
2486 	u16 dcid, scid;
2487 	struct l2cap_chan *chan;
2488 	struct sock *sk;
2489 
2490 	scid = __le16_to_cpu(req->scid);
2491 	dcid = __le16_to_cpu(req->dcid);
2492 
2493 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2494 
2495 	chan = l2cap_get_chan_by_scid(conn, dcid);
2496 	if (!chan)
2497 		return 0;
2498 
2499 	sk = chan->sk;
2500 
2501 	rsp.dcid = cpu_to_le16(chan->scid);
2502 	rsp.scid = cpu_to_le16(chan->dcid);
2503 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2504 
2505 	sk->sk_shutdown = SHUTDOWN_MASK;
2506 
2507 	/* don't delete l2cap channel if sk is owned by user */
2508 	if (sock_owned_by_user(sk)) {
2509 		sk->sk_state = BT_DISCONN;
2510 		l2cap_sock_clear_timer(sk);
2511 		l2cap_sock_set_timer(sk, HZ / 5);
2512 		bh_unlock_sock(sk);
2513 		return 0;
2514 	}
2515 
2516 	l2cap_chan_del(chan, ECONNRESET);
2517 	bh_unlock_sock(sk);
2518 
2519 	l2cap_sock_kill(sk);
2520 	return 0;
2521 }
2522 
2523 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2524 {
2525 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2526 	u16 dcid, scid;
2527 	struct l2cap_chan *chan;
2528 	struct sock *sk;
2529 
2530 	scid = __le16_to_cpu(rsp->scid);
2531 	dcid = __le16_to_cpu(rsp->dcid);
2532 
2533 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2534 
2535 	chan = l2cap_get_chan_by_scid(conn, scid);
2536 	if (!chan)
2537 		return 0;
2538 
2539 	sk = chan->sk;
2540 
2541 	/* don't delete l2cap channel if sk is owned by user */
2542 	if (sock_owned_by_user(sk)) {
2543 		sk->sk_state = BT_DISCONN;
2544 		l2cap_sock_clear_timer(sk);
2545 		l2cap_sock_set_timer(sk, HZ / 5);
2546 		bh_unlock_sock(sk);
2547 		return 0;
2548 	}
2549 
2550 	l2cap_chan_del(chan, 0);
2551 	bh_unlock_sock(sk);
2552 
2553 	l2cap_sock_kill(sk);
2554 	return 0;
2555 }
2556 
2557 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2558 {
2559 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2560 	u16 type;
2561 
2562 	type = __le16_to_cpu(req->type);
2563 
2564 	BT_DBG("type 0x%4.4x", type);
2565 
2566 	if (type == L2CAP_IT_FEAT_MASK) {
2567 		u8 buf[8];
2568 		u32 feat_mask = l2cap_feat_mask;
2569 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2570 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2571 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2572 		if (!disable_ertm)
2573 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2574 							 | L2CAP_FEAT_FCS;
2575 		put_unaligned_le32(feat_mask, rsp->data);
2576 		l2cap_send_cmd(conn, cmd->ident,
2577 					L2CAP_INFO_RSP, sizeof(buf), buf);
2578 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2579 		u8 buf[12];
2580 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2581 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2582 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2583 		memcpy(buf + 4, l2cap_fixed_chan, 8);
2584 		l2cap_send_cmd(conn, cmd->ident,
2585 					L2CAP_INFO_RSP, sizeof(buf), buf);
2586 	} else {
2587 		struct l2cap_info_rsp rsp;
2588 		rsp.type   = cpu_to_le16(type);
2589 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2590 		l2cap_send_cmd(conn, cmd->ident,
2591 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2592 	}
2593 
2594 	return 0;
2595 }
2596 
2597 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2598 {
2599 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2600 	u16 type, result;
2601 
2602 	type   = __le16_to_cpu(rsp->type);
2603 	result = __le16_to_cpu(rsp->result);
2604 
2605 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2606 
2607 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
2608 	if (cmd->ident != conn->info_ident ||
2609 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2610 		return 0;
2611 
2612 	del_timer(&conn->info_timer);
2613 
2614 	if (result != L2CAP_IR_SUCCESS) {
2615 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2616 		conn->info_ident = 0;
2617 
2618 		l2cap_conn_start(conn);
2619 
2620 		return 0;
2621 	}
2622 
2623 	if (type == L2CAP_IT_FEAT_MASK) {
2624 		conn->feat_mask = get_unaligned_le32(rsp->data);
2625 
2626 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2627 			struct l2cap_info_req req;
2628 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2629 
2630 			conn->info_ident = l2cap_get_ident(conn);
2631 
2632 			l2cap_send_cmd(conn, conn->info_ident,
2633 					L2CAP_INFO_REQ, sizeof(req), &req);
2634 		} else {
2635 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2636 			conn->info_ident = 0;
2637 
2638 			l2cap_conn_start(conn);
2639 		}
2640 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2641 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2642 		conn->info_ident = 0;
2643 
2644 		l2cap_conn_start(conn);
2645 	}
2646 
2647 	return 0;
2648 }
2649 
2650 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2651 							u16 to_multiplier)
2652 {
2653 	u16 max_latency;
2654 
2655 	if (min > max || min < 6 || max > 3200)
2656 		return -EINVAL;
2657 
2658 	if (to_multiplier < 10 || to_multiplier > 3200)
2659 		return -EINVAL;
2660 
2661 	if (max >= to_multiplier * 8)
2662 		return -EINVAL;
2663 
2664 	max_latency = (to_multiplier * 8 / max) - 1;
2665 	if (latency > 499 || latency > max_latency)
2666 		return -EINVAL;
2667 
2668 	return 0;
2669 }
2670 
2671 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2672 					struct l2cap_cmd_hdr *cmd, u8 *data)
2673 {
2674 	struct hci_conn *hcon = conn->hcon;
2675 	struct l2cap_conn_param_update_req *req;
2676 	struct l2cap_conn_param_update_rsp rsp;
2677 	u16 min, max, latency, to_multiplier, cmd_len;
2678 	int err;
2679 
2680 	if (!(hcon->link_mode & HCI_LM_MASTER))
2681 		return -EINVAL;
2682 
2683 	cmd_len = __le16_to_cpu(cmd->len);
2684 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2685 		return -EPROTO;
2686 
2687 	req = (struct l2cap_conn_param_update_req *) data;
2688 	min		= __le16_to_cpu(req->min);
2689 	max		= __le16_to_cpu(req->max);
2690 	latency		= __le16_to_cpu(req->latency);
2691 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
2692 
2693 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2694 						min, max, latency, to_multiplier);
2695 
2696 	memset(&rsp, 0, sizeof(rsp));
2697 
2698 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2699 	if (err)
2700 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2701 	else
2702 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2703 
2704 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2705 							sizeof(rsp), &rsp);
2706 
2707 	if (!err)
2708 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2709 
2710 	return 0;
2711 }
2712 
2713 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2714 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2715 {
2716 	int err = 0;
2717 
2718 	switch (cmd->code) {
2719 	case L2CAP_COMMAND_REJ:
2720 		l2cap_command_rej(conn, cmd, data);
2721 		break;
2722 
2723 	case L2CAP_CONN_REQ:
2724 		err = l2cap_connect_req(conn, cmd, data);
2725 		break;
2726 
2727 	case L2CAP_CONN_RSP:
2728 		err = l2cap_connect_rsp(conn, cmd, data);
2729 		break;
2730 
2731 	case L2CAP_CONF_REQ:
2732 		err = l2cap_config_req(conn, cmd, cmd_len, data);
2733 		break;
2734 
2735 	case L2CAP_CONF_RSP:
2736 		err = l2cap_config_rsp(conn, cmd, data);
2737 		break;
2738 
2739 	case L2CAP_DISCONN_REQ:
2740 		err = l2cap_disconnect_req(conn, cmd, data);
2741 		break;
2742 
2743 	case L2CAP_DISCONN_RSP:
2744 		err = l2cap_disconnect_rsp(conn, cmd, data);
2745 		break;
2746 
2747 	case L2CAP_ECHO_REQ:
2748 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2749 		break;
2750 
2751 	case L2CAP_ECHO_RSP:
2752 		break;
2753 
2754 	case L2CAP_INFO_REQ:
2755 		err = l2cap_information_req(conn, cmd, data);
2756 		break;
2757 
2758 	case L2CAP_INFO_RSP:
2759 		err = l2cap_information_rsp(conn, cmd, data);
2760 		break;
2761 
2762 	default:
2763 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2764 		err = -EINVAL;
2765 		break;
2766 	}
2767 
2768 	return err;
2769 }
2770 
2771 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2772 					struct l2cap_cmd_hdr *cmd, u8 *data)
2773 {
2774 	switch (cmd->code) {
2775 	case L2CAP_COMMAND_REJ:
2776 		return 0;
2777 
2778 	case L2CAP_CONN_PARAM_UPDATE_REQ:
2779 		return l2cap_conn_param_update_req(conn, cmd, data);
2780 
2781 	case L2CAP_CONN_PARAM_UPDATE_RSP:
2782 		return 0;
2783 
2784 	default:
2785 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2786 		return -EINVAL;
2787 	}
2788 }
2789 
2790 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2791 							struct sk_buff *skb)
2792 {
2793 	u8 *data = skb->data;
2794 	int len = skb->len;
2795 	struct l2cap_cmd_hdr cmd;
2796 	int err;
2797 
2798 	l2cap_raw_recv(conn, skb);
2799 
2800 	while (len >= L2CAP_CMD_HDR_SIZE) {
2801 		u16 cmd_len;
2802 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2803 		data += L2CAP_CMD_HDR_SIZE;
2804 		len  -= L2CAP_CMD_HDR_SIZE;
2805 
2806 		cmd_len = le16_to_cpu(cmd.len);
2807 
2808 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2809 
2810 		if (cmd_len > len || !cmd.ident) {
2811 			BT_DBG("corrupted command");
2812 			break;
2813 		}
2814 
2815 		if (conn->hcon->type == LE_LINK)
2816 			err = l2cap_le_sig_cmd(conn, &cmd, data);
2817 		else
2818 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2819 
2820 		if (err) {
2821 			struct l2cap_cmd_rej rej;
2822 
2823 			BT_ERR("Wrong link type (%d)", err);
2824 
2825 			/* FIXME: Map err to a valid reason */
2826 			rej.reason = cpu_to_le16(0);
2827 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2828 		}
2829 
2830 		data += cmd_len;
2831 		len  -= cmd_len;
2832 	}
2833 
2834 	kfree_skb(skb);
2835 }
2836 
2837 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
2838 {
2839 	u16 our_fcs, rcv_fcs;
2840 	int hdr_size = L2CAP_HDR_SIZE + 2;
2841 
2842 	if (chan->fcs == L2CAP_FCS_CRC16) {
2843 		skb_trim(skb, skb->len - 2);
2844 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2845 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2846 
2847 		if (our_fcs != rcv_fcs)
2848 			return -EBADMSG;
2849 	}
2850 	return 0;
2851 }
2852 
2853 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2854 {
2855 	u16 control = 0;
2856 
2857 	chan->frames_sent = 0;
2858 
2859 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2860 
2861 	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2862 		control |= L2CAP_SUPER_RCV_NOT_READY;
2863 		l2cap_send_sframe(chan, control);
2864 		chan->conn_state |= L2CAP_CONN_RNR_SENT;
2865 	}
2866 
2867 	if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2868 		l2cap_retransmit_frames(chan);
2869 
2870 	l2cap_ertm_send(chan);
2871 
2872 	if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2873 			chan->frames_sent == 0) {
2874 		control |= L2CAP_SUPER_RCV_READY;
2875 		l2cap_send_sframe(chan, control);
2876 	}
2877 }
2878 
2879 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2880 {
2881 	struct sk_buff *next_skb;
2882 	int tx_seq_offset, next_tx_seq_offset;
2883 
2884 	bt_cb(skb)->tx_seq = tx_seq;
2885 	bt_cb(skb)->sar = sar;
2886 
2887 	next_skb = skb_peek(&chan->srej_q);
2888 	if (!next_skb) {
2889 		__skb_queue_tail(&chan->srej_q, skb);
2890 		return 0;
2891 	}
2892 
2893 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2894 	if (tx_seq_offset < 0)
2895 		tx_seq_offset += 64;
2896 
2897 	do {
2898 		if (bt_cb(next_skb)->tx_seq == tx_seq)
2899 			return -EINVAL;
2900 
2901 		next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2902 						chan->buffer_seq) % 64;
2903 		if (next_tx_seq_offset < 0)
2904 			next_tx_seq_offset += 64;
2905 
2906 		if (next_tx_seq_offset > tx_seq_offset) {
2907 			__skb_queue_before(&chan->srej_q, next_skb, skb);
2908 			return 0;
2909 		}
2910 
2911 		if (skb_queue_is_last(&chan->srej_q, next_skb))
2912 			break;
2913 
2914 	} while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2915 
2916 	__skb_queue_tail(&chan->srej_q, skb);
2917 
2918 	return 0;
2919 }
2920 
2921 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2922 {
2923 	struct sk_buff *_skb;
2924 	int err;
2925 
2926 	switch (control & L2CAP_CTRL_SAR) {
2927 	case L2CAP_SDU_UNSEGMENTED:
2928 		if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2929 			goto drop;
2930 
2931 		err = sock_queue_rcv_skb(chan->sk, skb);
2932 		if (!err)
2933 			return err;
2934 
2935 		break;
2936 
2937 	case L2CAP_SDU_START:
2938 		if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2939 			goto drop;
2940 
2941 		chan->sdu_len = get_unaligned_le16(skb->data);
2942 
2943 		if (chan->sdu_len > chan->imtu)
2944 			goto disconnect;
2945 
2946 		chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2947 		if (!chan->sdu)
2948 			return -ENOMEM;
2949 
2950 		/* pull sdu_len bytes only after alloc, because of Local Busy
2951 		 * condition we have to be sure that this will be executed
2952 		 * only once, i.e., when alloc does not fail */
2953 		skb_pull(skb, 2);
2954 
2955 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2956 
2957 		chan->conn_state |= L2CAP_CONN_SAR_SDU;
2958 		chan->partial_sdu_len = skb->len;
2959 		break;
2960 
2961 	case L2CAP_SDU_CONTINUE:
2962 		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2963 			goto disconnect;
2964 
2965 		if (!chan->sdu)
2966 			goto disconnect;
2967 
2968 		chan->partial_sdu_len += skb->len;
2969 		if (chan->partial_sdu_len > chan->sdu_len)
2970 			goto drop;
2971 
2972 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2973 
2974 		break;
2975 
2976 	case L2CAP_SDU_END:
2977 		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2978 			goto disconnect;
2979 
2980 		if (!chan->sdu)
2981 			goto disconnect;
2982 
2983 		if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2984 			chan->partial_sdu_len += skb->len;
2985 
2986 			if (chan->partial_sdu_len > chan->imtu)
2987 				goto drop;
2988 
2989 			if (chan->partial_sdu_len != chan->sdu_len)
2990 				goto drop;
2991 
2992 			memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2993 		}
2994 
2995 		_skb = skb_clone(chan->sdu, GFP_ATOMIC);
2996 		if (!_skb) {
2997 			chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2998 			return -ENOMEM;
2999 		}
3000 
3001 		err = sock_queue_rcv_skb(chan->sk, _skb);
3002 		if (err < 0) {
3003 			kfree_skb(_skb);
3004 			chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3005 			return err;
3006 		}
3007 
3008 		chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3009 		chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3010 
3011 		kfree_skb(chan->sdu);
3012 		break;
3013 	}
3014 
3015 	kfree_skb(skb);
3016 	return 0;
3017 
3018 drop:
3019 	kfree_skb(chan->sdu);
3020 	chan->sdu = NULL;
3021 
3022 disconnect:
3023 	l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3024 	kfree_skb(skb);
3025 	return 0;
3026 }
3027 
3028 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3029 {
3030 	struct sk_buff *skb;
3031 	u16 control;
3032 	int err;
3033 
3034 	while ((skb = skb_dequeue(&chan->busy_q))) {
3035 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3036 		err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3037 		if (err < 0) {
3038 			skb_queue_head(&chan->busy_q, skb);
3039 			return -EBUSY;
3040 		}
3041 
3042 		chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3043 	}
3044 
3045 	if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3046 		goto done;
3047 
3048 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3049 	control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3050 	l2cap_send_sframe(chan, control);
3051 	chan->retry_count = 1;
3052 
3053 	del_timer(&chan->retrans_timer);
3054 	__mod_monitor_timer();
3055 
3056 	chan->conn_state |= L2CAP_CONN_WAIT_F;
3057 
3058 done:
3059 	chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3060 	chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3061 
3062 	BT_DBG("chan %p, Exit local busy", chan);
3063 
3064 	return 0;
3065 }
3066 
3067 static void l2cap_busy_work(struct work_struct *work)
3068 {
3069 	DECLARE_WAITQUEUE(wait, current);
3070 	struct l2cap_chan *chan =
3071 		container_of(work, struct l2cap_chan, busy_work);
3072 	struct sock *sk = chan->sk;
3073 	int n_tries = 0, timeo = HZ/5, err;
3074 	struct sk_buff *skb;
3075 
3076 	lock_sock(sk);
3077 
3078 	add_wait_queue(sk_sleep(sk), &wait);
3079 	while ((skb = skb_peek(&chan->busy_q))) {
3080 		set_current_state(TASK_INTERRUPTIBLE);
3081 
3082 		if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3083 			err = -EBUSY;
3084 			l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3085 			break;
3086 		}
3087 
3088 		if (!timeo)
3089 			timeo = HZ/5;
3090 
3091 		if (signal_pending(current)) {
3092 			err = sock_intr_errno(timeo);
3093 			break;
3094 		}
3095 
3096 		release_sock(sk);
3097 		timeo = schedule_timeout(timeo);
3098 		lock_sock(sk);
3099 
3100 		err = sock_error(sk);
3101 		if (err)
3102 			break;
3103 
3104 		if (l2cap_try_push_rx_skb(chan) == 0)
3105 			break;
3106 	}
3107 
3108 	set_current_state(TASK_RUNNING);
3109 	remove_wait_queue(sk_sleep(sk), &wait);
3110 
3111 	release_sock(sk);
3112 }
3113 
3114 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3115 {
3116 	int sctrl, err;
3117 
3118 	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3119 		bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3120 		__skb_queue_tail(&chan->busy_q, skb);
3121 		return l2cap_try_push_rx_skb(chan);
3122 
3123 
3124 	}
3125 
3126 	err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3127 	if (err >= 0) {
3128 		chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3129 		return err;
3130 	}
3131 
3132 	/* Busy Condition */
3133 	BT_DBG("chan %p, Enter local busy", chan);
3134 
3135 	chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3136 	bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3137 	__skb_queue_tail(&chan->busy_q, skb);
3138 
3139 	sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3140 	sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3141 	l2cap_send_sframe(chan, sctrl);
3142 
3143 	chan->conn_state |= L2CAP_CONN_RNR_SENT;
3144 
3145 	del_timer(&chan->ack_timer);
3146 
3147 	queue_work(_busy_wq, &chan->busy_work);
3148 
3149 	return err;
3150 }
3151 
3152 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3153 {
3154 	struct sk_buff *_skb;
3155 	int err = -EINVAL;
3156 
3157 	/*
3158 	 * TODO: We have to notify the userland if some data is lost with the
3159 	 * Streaming Mode.
3160 	 */
3161 
3162 	switch (control & L2CAP_CTRL_SAR) {
3163 	case L2CAP_SDU_UNSEGMENTED:
3164 		if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3165 			kfree_skb(chan->sdu);
3166 			break;
3167 		}
3168 
3169 		err = sock_queue_rcv_skb(chan->sk, skb);
3170 		if (!err)
3171 			return 0;
3172 
3173 		break;
3174 
3175 	case L2CAP_SDU_START:
3176 		if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3177 			kfree_skb(chan->sdu);
3178 			break;
3179 		}
3180 
3181 		chan->sdu_len = get_unaligned_le16(skb->data);
3182 		skb_pull(skb, 2);
3183 
3184 		if (chan->sdu_len > chan->imtu) {
3185 			err = -EMSGSIZE;
3186 			break;
3187 		}
3188 
3189 		chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3190 		if (!chan->sdu) {
3191 			err = -ENOMEM;
3192 			break;
3193 		}
3194 
3195 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3196 
3197 		chan->conn_state |= L2CAP_CONN_SAR_SDU;
3198 		chan->partial_sdu_len = skb->len;
3199 		err = 0;
3200 		break;
3201 
3202 	case L2CAP_SDU_CONTINUE:
3203 		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3204 			break;
3205 
3206 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3207 
3208 		chan->partial_sdu_len += skb->len;
3209 		if (chan->partial_sdu_len > chan->sdu_len)
3210 			kfree_skb(chan->sdu);
3211 		else
3212 			err = 0;
3213 
3214 		break;
3215 
3216 	case L2CAP_SDU_END:
3217 		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3218 			break;
3219 
3220 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3221 
3222 		chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3223 		chan->partial_sdu_len += skb->len;
3224 
3225 		if (chan->partial_sdu_len > chan->imtu)
3226 			goto drop;
3227 
3228 		if (chan->partial_sdu_len == chan->sdu_len) {
3229 			_skb = skb_clone(chan->sdu, GFP_ATOMIC);
3230 			err = sock_queue_rcv_skb(chan->sk, _skb);
3231 			if (err < 0)
3232 				kfree_skb(_skb);
3233 		}
3234 		err = 0;
3235 
3236 drop:
3237 		kfree_skb(chan->sdu);
3238 		break;
3239 	}
3240 
3241 	kfree_skb(skb);
3242 	return err;
3243 }
3244 
3245 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3246 {
3247 	struct sk_buff *skb;
3248 	u16 control;
3249 
3250 	while ((skb = skb_peek(&chan->srej_q))) {
3251 		if (bt_cb(skb)->tx_seq != tx_seq)
3252 			break;
3253 
3254 		skb = skb_dequeue(&chan->srej_q);
3255 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3256 		l2cap_ertm_reassembly_sdu(chan, skb, control);
3257 		chan->buffer_seq_srej =
3258 			(chan->buffer_seq_srej + 1) % 64;
3259 		tx_seq = (tx_seq + 1) % 64;
3260 	}
3261 }
3262 
3263 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3264 {
3265 	struct srej_list *l, *tmp;
3266 	u16 control;
3267 
3268 	list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3269 		if (l->tx_seq == tx_seq) {
3270 			list_del(&l->list);
3271 			kfree(l);
3272 			return;
3273 		}
3274 		control = L2CAP_SUPER_SELECT_REJECT;
3275 		control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3276 		l2cap_send_sframe(chan, control);
3277 		list_del(&l->list);
3278 		list_add_tail(&l->list, &chan->srej_l);
3279 	}
3280 }
3281 
3282 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3283 {
3284 	struct srej_list *new;
3285 	u16 control;
3286 
3287 	while (tx_seq != chan->expected_tx_seq) {
3288 		control = L2CAP_SUPER_SELECT_REJECT;
3289 		control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3290 		l2cap_send_sframe(chan, control);
3291 
3292 		new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3293 		new->tx_seq = chan->expected_tx_seq;
3294 		chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3295 		list_add_tail(&new->list, &chan->srej_l);
3296 	}
3297 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3298 }
3299 
3300 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3301 {
3302 	u8 tx_seq = __get_txseq(rx_control);
3303 	u8 req_seq = __get_reqseq(rx_control);
3304 	u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3305 	int tx_seq_offset, expected_tx_seq_offset;
3306 	int num_to_ack = (chan->tx_win/6) + 1;
3307 	int err = 0;
3308 
3309 	BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3310 							tx_seq, rx_control);
3311 
3312 	if (L2CAP_CTRL_FINAL & rx_control &&
3313 			chan->conn_state & L2CAP_CONN_WAIT_F) {
3314 		del_timer(&chan->monitor_timer);
3315 		if (chan->unacked_frames > 0)
3316 			__mod_retrans_timer();
3317 		chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3318 	}
3319 
3320 	chan->expected_ack_seq = req_seq;
3321 	l2cap_drop_acked_frames(chan);
3322 
3323 	if (tx_seq == chan->expected_tx_seq)
3324 		goto expected;
3325 
3326 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3327 	if (tx_seq_offset < 0)
3328 		tx_seq_offset += 64;
3329 
3330 	/* invalid tx_seq */
3331 	if (tx_seq_offset >= chan->tx_win) {
3332 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3333 		goto drop;
3334 	}
3335 
3336 	if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3337 		goto drop;
3338 
3339 	if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3340 		struct srej_list *first;
3341 
3342 		first = list_first_entry(&chan->srej_l,
3343 				struct srej_list, list);
3344 		if (tx_seq == first->tx_seq) {
3345 			l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3346 			l2cap_check_srej_gap(chan, tx_seq);
3347 
3348 			list_del(&first->list);
3349 			kfree(first);
3350 
3351 			if (list_empty(&chan->srej_l)) {
3352 				chan->buffer_seq = chan->buffer_seq_srej;
3353 				chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3354 				l2cap_send_ack(chan);
3355 				BT_DBG("chan %p, Exit SREJ_SENT", chan);
3356 			}
3357 		} else {
3358 			struct srej_list *l;
3359 
3360 			/* duplicated tx_seq */
3361 			if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3362 				goto drop;
3363 
3364 			list_for_each_entry(l, &chan->srej_l, list) {
3365 				if (l->tx_seq == tx_seq) {
3366 					l2cap_resend_srejframe(chan, tx_seq);
3367 					return 0;
3368 				}
3369 			}
3370 			l2cap_send_srejframe(chan, tx_seq);
3371 		}
3372 	} else {
3373 		expected_tx_seq_offset =
3374 			(chan->expected_tx_seq - chan->buffer_seq) % 64;
3375 		if (expected_tx_seq_offset < 0)
3376 			expected_tx_seq_offset += 64;
3377 
3378 		/* duplicated tx_seq */
3379 		if (tx_seq_offset < expected_tx_seq_offset)
3380 			goto drop;
3381 
3382 		chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3383 
3384 		BT_DBG("chan %p, Enter SREJ", chan);
3385 
3386 		INIT_LIST_HEAD(&chan->srej_l);
3387 		chan->buffer_seq_srej = chan->buffer_seq;
3388 
3389 		__skb_queue_head_init(&chan->srej_q);
3390 		__skb_queue_head_init(&chan->busy_q);
3391 		l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3392 
3393 		chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3394 
3395 		l2cap_send_srejframe(chan, tx_seq);
3396 
3397 		del_timer(&chan->ack_timer);
3398 	}
3399 	return 0;
3400 
3401 expected:
3402 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3403 
3404 	if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3405 		bt_cb(skb)->tx_seq = tx_seq;
3406 		bt_cb(skb)->sar = sar;
3407 		__skb_queue_tail(&chan->srej_q, skb);
3408 		return 0;
3409 	}
3410 
3411 	err = l2cap_push_rx_skb(chan, skb, rx_control);
3412 	if (err < 0)
3413 		return 0;
3414 
3415 	if (rx_control & L2CAP_CTRL_FINAL) {
3416 		if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3417 			chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3418 		else
3419 			l2cap_retransmit_frames(chan);
3420 	}
3421 
3422 	__mod_ack_timer();
3423 
3424 	chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3425 	if (chan->num_acked == num_to_ack - 1)
3426 		l2cap_send_ack(chan);
3427 
3428 	return 0;
3429 
3430 drop:
3431 	kfree_skb(skb);
3432 	return 0;
3433 }
3434 
3435 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3436 {
3437 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3438 						rx_control);
3439 
3440 	chan->expected_ack_seq = __get_reqseq(rx_control);
3441 	l2cap_drop_acked_frames(chan);
3442 
3443 	if (rx_control & L2CAP_CTRL_POLL) {
3444 		chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3445 		if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3446 			if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3447 					(chan->unacked_frames > 0))
3448 				__mod_retrans_timer();
3449 
3450 			chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3451 			l2cap_send_srejtail(chan);
3452 		} else {
3453 			l2cap_send_i_or_rr_or_rnr(chan);
3454 		}
3455 
3456 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3457 		chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3458 
3459 		if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3460 			chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3461 		else
3462 			l2cap_retransmit_frames(chan);
3463 
3464 	} else {
3465 		if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3466 				(chan->unacked_frames > 0))
3467 			__mod_retrans_timer();
3468 
3469 		chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3470 		if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3471 			l2cap_send_ack(chan);
3472 		else
3473 			l2cap_ertm_send(chan);
3474 	}
3475 }
3476 
3477 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3478 {
3479 	u8 tx_seq = __get_reqseq(rx_control);
3480 
3481 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3482 
3483 	chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3484 
3485 	chan->expected_ack_seq = tx_seq;
3486 	l2cap_drop_acked_frames(chan);
3487 
3488 	if (rx_control & L2CAP_CTRL_FINAL) {
3489 		if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3490 			chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3491 		else
3492 			l2cap_retransmit_frames(chan);
3493 	} else {
3494 		l2cap_retransmit_frames(chan);
3495 
3496 		if (chan->conn_state & L2CAP_CONN_WAIT_F)
3497 			chan->conn_state |= L2CAP_CONN_REJ_ACT;
3498 	}
3499 }
3500 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3501 {
3502 	u8 tx_seq = __get_reqseq(rx_control);
3503 
3504 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3505 
3506 	chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3507 
3508 	if (rx_control & L2CAP_CTRL_POLL) {
3509 		chan->expected_ack_seq = tx_seq;
3510 		l2cap_drop_acked_frames(chan);
3511 
3512 		chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3513 		l2cap_retransmit_one_frame(chan, tx_seq);
3514 
3515 		l2cap_ertm_send(chan);
3516 
3517 		if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3518 			chan->srej_save_reqseq = tx_seq;
3519 			chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3520 		}
3521 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3522 		if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3523 				chan->srej_save_reqseq == tx_seq)
3524 			chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3525 		else
3526 			l2cap_retransmit_one_frame(chan, tx_seq);
3527 	} else {
3528 		l2cap_retransmit_one_frame(chan, tx_seq);
3529 		if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3530 			chan->srej_save_reqseq = tx_seq;
3531 			chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3532 		}
3533 	}
3534 }
3535 
3536 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3537 {
3538 	u8 tx_seq = __get_reqseq(rx_control);
3539 
3540 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3541 
3542 	chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3543 	chan->expected_ack_seq = tx_seq;
3544 	l2cap_drop_acked_frames(chan);
3545 
3546 	if (rx_control & L2CAP_CTRL_POLL)
3547 		chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3548 
3549 	if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3550 		del_timer(&chan->retrans_timer);
3551 		if (rx_control & L2CAP_CTRL_POLL)
3552 			l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3553 		return;
3554 	}
3555 
3556 	if (rx_control & L2CAP_CTRL_POLL)
3557 		l2cap_send_srejtail(chan);
3558 	else
3559 		l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3560 }
3561 
3562 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3563 {
3564 	BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3565 
3566 	if (L2CAP_CTRL_FINAL & rx_control &&
3567 			chan->conn_state & L2CAP_CONN_WAIT_F) {
3568 		del_timer(&chan->monitor_timer);
3569 		if (chan->unacked_frames > 0)
3570 			__mod_retrans_timer();
3571 		chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3572 	}
3573 
3574 	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3575 	case L2CAP_SUPER_RCV_READY:
3576 		l2cap_data_channel_rrframe(chan, rx_control);
3577 		break;
3578 
3579 	case L2CAP_SUPER_REJECT:
3580 		l2cap_data_channel_rejframe(chan, rx_control);
3581 		break;
3582 
3583 	case L2CAP_SUPER_SELECT_REJECT:
3584 		l2cap_data_channel_srejframe(chan, rx_control);
3585 		break;
3586 
3587 	case L2CAP_SUPER_RCV_NOT_READY:
3588 		l2cap_data_channel_rnrframe(chan, rx_control);
3589 		break;
3590 	}
3591 
3592 	kfree_skb(skb);
3593 	return 0;
3594 }
3595 
3596 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3597 {
3598 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3599 	u16 control;
3600 	u8 req_seq;
3601 	int len, next_tx_seq_offset, req_seq_offset;
3602 
3603 	control = get_unaligned_le16(skb->data);
3604 	skb_pull(skb, 2);
3605 	len = skb->len;
3606 
3607 	/*
3608 	 * We can just drop the corrupted I-frame here.
3609 	 * Receiver will miss it and start proper recovery
3610 	 * procedures and ask retransmission.
3611 	 */
3612 	if (l2cap_check_fcs(chan, skb))
3613 		goto drop;
3614 
3615 	if (__is_sar_start(control) && __is_iframe(control))
3616 		len -= 2;
3617 
3618 	if (chan->fcs == L2CAP_FCS_CRC16)
3619 		len -= 2;
3620 
3621 	if (len > chan->mps) {
3622 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3623 		goto drop;
3624 	}
3625 
3626 	req_seq = __get_reqseq(control);
3627 	req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3628 	if (req_seq_offset < 0)
3629 		req_seq_offset += 64;
3630 
3631 	next_tx_seq_offset =
3632 		(chan->next_tx_seq - chan->expected_ack_seq) % 64;
3633 	if (next_tx_seq_offset < 0)
3634 		next_tx_seq_offset += 64;
3635 
3636 	/* check for invalid req-seq */
3637 	if (req_seq_offset > next_tx_seq_offset) {
3638 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3639 		goto drop;
3640 	}
3641 
3642 	if (__is_iframe(control)) {
3643 		if (len < 0) {
3644 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3645 			goto drop;
3646 		}
3647 
3648 		l2cap_data_channel_iframe(chan, control, skb);
3649 	} else {
3650 		if (len != 0) {
3651 			BT_ERR("%d", len);
3652 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3653 			goto drop;
3654 		}
3655 
3656 		l2cap_data_channel_sframe(chan, control, skb);
3657 	}
3658 
3659 	return 0;
3660 
3661 drop:
3662 	kfree_skb(skb);
3663 	return 0;
3664 }
3665 
3666 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3667 {
3668 	struct l2cap_chan *chan;
3669 	struct sock *sk = NULL;
3670 	struct l2cap_pinfo *pi;
3671 	u16 control;
3672 	u8 tx_seq;
3673 	int len;
3674 
3675 	chan = l2cap_get_chan_by_scid(conn, cid);
3676 	if (!chan) {
3677 		BT_DBG("unknown cid 0x%4.4x", cid);
3678 		goto drop;
3679 	}
3680 
3681 	sk = chan->sk;
3682 	pi = l2cap_pi(sk);
3683 
3684 	BT_DBG("chan %p, len %d", chan, skb->len);
3685 
3686 	if (sk->sk_state != BT_CONNECTED)
3687 		goto drop;
3688 
3689 	switch (chan->mode) {
3690 	case L2CAP_MODE_BASIC:
3691 		/* If socket recv buffers overflows we drop data here
3692 		 * which is *bad* because L2CAP has to be reliable.
3693 		 * But we don't have any other choice. L2CAP doesn't
3694 		 * provide flow control mechanism. */
3695 
3696 		if (chan->imtu < skb->len)
3697 			goto drop;
3698 
3699 		if (!sock_queue_rcv_skb(sk, skb))
3700 			goto done;
3701 		break;
3702 
3703 	case L2CAP_MODE_ERTM:
3704 		if (!sock_owned_by_user(sk)) {
3705 			l2cap_ertm_data_rcv(sk, skb);
3706 		} else {
3707 			if (sk_add_backlog(sk, skb))
3708 				goto drop;
3709 		}
3710 
3711 		goto done;
3712 
3713 	case L2CAP_MODE_STREAMING:
3714 		control = get_unaligned_le16(skb->data);
3715 		skb_pull(skb, 2);
3716 		len = skb->len;
3717 
3718 		if (l2cap_check_fcs(chan, skb))
3719 			goto drop;
3720 
3721 		if (__is_sar_start(control))
3722 			len -= 2;
3723 
3724 		if (chan->fcs == L2CAP_FCS_CRC16)
3725 			len -= 2;
3726 
3727 		if (len > chan->mps || len < 0 || __is_sframe(control))
3728 			goto drop;
3729 
3730 		tx_seq = __get_txseq(control);
3731 
3732 		if (chan->expected_tx_seq == tx_seq)
3733 			chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3734 		else
3735 			chan->expected_tx_seq = (tx_seq + 1) % 64;
3736 
3737 		l2cap_streaming_reassembly_sdu(chan, skb, control);
3738 
3739 		goto done;
3740 
3741 	default:
3742 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3743 		break;
3744 	}
3745 
3746 drop:
3747 	kfree_skb(skb);
3748 
3749 done:
3750 	if (sk)
3751 		bh_unlock_sock(sk);
3752 
3753 	return 0;
3754 }
3755 
3756 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3757 {
3758 	struct sock *sk = NULL;
3759 	struct l2cap_chan *chan;
3760 
3761 	chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3762 	if (!chan)
3763 		goto drop;
3764 
3765 	sk = chan->sk;
3766 
3767 	bh_lock_sock(sk);
3768 
3769 	BT_DBG("sk %p, len %d", sk, skb->len);
3770 
3771 	if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3772 		goto drop;
3773 
3774 	if (l2cap_pi(sk)->chan->imtu < skb->len)
3775 		goto drop;
3776 
3777 	if (!sock_queue_rcv_skb(sk, skb))
3778 		goto done;
3779 
3780 drop:
3781 	kfree_skb(skb);
3782 
3783 done:
3784 	if (sk)
3785 		bh_unlock_sock(sk);
3786 	return 0;
3787 }
3788 
3789 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3790 {
3791 	struct sock *sk = NULL;
3792 	struct l2cap_chan *chan;
3793 
3794 	chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3795 	if (!chan)
3796 		goto drop;
3797 
3798 	sk = chan->sk;
3799 
3800 	bh_lock_sock(sk);
3801 
3802 	BT_DBG("sk %p, len %d", sk, skb->len);
3803 
3804 	if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3805 		goto drop;
3806 
3807 	if (l2cap_pi(sk)->chan->imtu < skb->len)
3808 		goto drop;
3809 
3810 	if (!sock_queue_rcv_skb(sk, skb))
3811 		goto done;
3812 
3813 drop:
3814 	kfree_skb(skb);
3815 
3816 done:
3817 	if (sk)
3818 		bh_unlock_sock(sk);
3819 	return 0;
3820 }
3821 
3822 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3823 {
3824 	struct l2cap_hdr *lh = (void *) skb->data;
3825 	u16 cid, len;
3826 	__le16 psm;
3827 
3828 	skb_pull(skb, L2CAP_HDR_SIZE);
3829 	cid = __le16_to_cpu(lh->cid);
3830 	len = __le16_to_cpu(lh->len);
3831 
3832 	if (len != skb->len) {
3833 		kfree_skb(skb);
3834 		return;
3835 	}
3836 
3837 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
3838 
3839 	switch (cid) {
3840 	case L2CAP_CID_LE_SIGNALING:
3841 	case L2CAP_CID_SIGNALING:
3842 		l2cap_sig_channel(conn, skb);
3843 		break;
3844 
3845 	case L2CAP_CID_CONN_LESS:
3846 		psm = get_unaligned_le16(skb->data);
3847 		skb_pull(skb, 2);
3848 		l2cap_conless_channel(conn, psm, skb);
3849 		break;
3850 
3851 	case L2CAP_CID_LE_DATA:
3852 		l2cap_att_channel(conn, cid, skb);
3853 		break;
3854 
3855 	default:
3856 		l2cap_data_channel(conn, cid, skb);
3857 		break;
3858 	}
3859 }
3860 
3861 /* ---- L2CAP interface with lower layer (HCI) ---- */
3862 
3863 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3864 {
3865 	int exact = 0, lm1 = 0, lm2 = 0;
3866 	struct l2cap_chan *c;
3867 
3868 	if (type != ACL_LINK)
3869 		return -EINVAL;
3870 
3871 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3872 
3873 	/* Find listening sockets and check their link_mode */
3874 	read_lock(&chan_list_lock);
3875 	list_for_each_entry(c, &chan_list, global_l) {
3876 		struct sock *sk = c->sk;
3877 
3878 		if (sk->sk_state != BT_LISTEN)
3879 			continue;
3880 
3881 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3882 			lm1 |= HCI_LM_ACCEPT;
3883 			if (c->role_switch)
3884 				lm1 |= HCI_LM_MASTER;
3885 			exact++;
3886 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3887 			lm2 |= HCI_LM_ACCEPT;
3888 			if (c->role_switch)
3889 				lm2 |= HCI_LM_MASTER;
3890 		}
3891 	}
3892 	read_unlock(&chan_list_lock);
3893 
3894 	return exact ? lm1 : lm2;
3895 }
3896 
3897 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3898 {
3899 	struct l2cap_conn *conn;
3900 
3901 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3902 
3903 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3904 		return -EINVAL;
3905 
3906 	if (!status) {
3907 		conn = l2cap_conn_add(hcon, status);
3908 		if (conn)
3909 			l2cap_conn_ready(conn);
3910 	} else
3911 		l2cap_conn_del(hcon, bt_err(status));
3912 
3913 	return 0;
3914 }
3915 
3916 static int l2cap_disconn_ind(struct hci_conn *hcon)
3917 {
3918 	struct l2cap_conn *conn = hcon->l2cap_data;
3919 
3920 	BT_DBG("hcon %p", hcon);
3921 
3922 	if (hcon->type != ACL_LINK || !conn)
3923 		return 0x13;
3924 
3925 	return conn->disc_reason;
3926 }
3927 
3928 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3929 {
3930 	BT_DBG("hcon %p reason %d", hcon, reason);
3931 
3932 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3933 		return -EINVAL;
3934 
3935 	l2cap_conn_del(hcon, bt_err(reason));
3936 
3937 	return 0;
3938 }
3939 
3940 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3941 {
3942 	struct sock *sk = chan->sk;
3943 
3944 	if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3945 		return;
3946 
3947 	if (encrypt == 0x00) {
3948 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
3949 			l2cap_sock_clear_timer(sk);
3950 			l2cap_sock_set_timer(sk, HZ * 5);
3951 		} else if (chan->sec_level == BT_SECURITY_HIGH)
3952 			__l2cap_sock_close(sk, ECONNREFUSED);
3953 	} else {
3954 		if (chan->sec_level == BT_SECURITY_MEDIUM)
3955 			l2cap_sock_clear_timer(sk);
3956 	}
3957 }
3958 
3959 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3960 {
3961 	struct l2cap_conn *conn = hcon->l2cap_data;
3962 	struct l2cap_chan *chan;
3963 
3964 	if (!conn)
3965 		return 0;
3966 
3967 	BT_DBG("conn %p", conn);
3968 
3969 	read_lock(&conn->chan_lock);
3970 
3971 	list_for_each_entry(chan, &conn->chan_l, list) {
3972 		struct sock *sk = chan->sk;
3973 
3974 		bh_lock_sock(sk);
3975 
3976 		if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3977 			bh_unlock_sock(sk);
3978 			continue;
3979 		}
3980 
3981 		if (!status && (sk->sk_state == BT_CONNECTED ||
3982 						sk->sk_state == BT_CONFIG)) {
3983 			l2cap_check_encryption(chan, encrypt);
3984 			bh_unlock_sock(sk);
3985 			continue;
3986 		}
3987 
3988 		if (sk->sk_state == BT_CONNECT) {
3989 			if (!status) {
3990 				struct l2cap_conn_req req;
3991 				req.scid = cpu_to_le16(chan->scid);
3992 				req.psm  = chan->psm;
3993 
3994 				chan->ident = l2cap_get_ident(conn);
3995 				chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3996 
3997 				l2cap_send_cmd(conn, chan->ident,
3998 					L2CAP_CONN_REQ, sizeof(req), &req);
3999 			} else {
4000 				l2cap_sock_clear_timer(sk);
4001 				l2cap_sock_set_timer(sk, HZ / 10);
4002 			}
4003 		} else if (sk->sk_state == BT_CONNECT2) {
4004 			struct l2cap_conn_rsp rsp;
4005 			__u16 result;
4006 
4007 			if (!status) {
4008 				sk->sk_state = BT_CONFIG;
4009 				result = L2CAP_CR_SUCCESS;
4010 			} else {
4011 				sk->sk_state = BT_DISCONN;
4012 				l2cap_sock_set_timer(sk, HZ / 10);
4013 				result = L2CAP_CR_SEC_BLOCK;
4014 			}
4015 
4016 			rsp.scid   = cpu_to_le16(chan->dcid);
4017 			rsp.dcid   = cpu_to_le16(chan->scid);
4018 			rsp.result = cpu_to_le16(result);
4019 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4020 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4021 							sizeof(rsp), &rsp);
4022 		}
4023 
4024 		bh_unlock_sock(sk);
4025 	}
4026 
4027 	read_unlock(&conn->chan_lock);
4028 
4029 	return 0;
4030 }
4031 
4032 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4033 {
4034 	struct l2cap_conn *conn = hcon->l2cap_data;
4035 
4036 	if (!conn)
4037 		conn = l2cap_conn_add(hcon, 0);
4038 
4039 	if (!conn)
4040 		goto drop;
4041 
4042 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4043 
4044 	if (!(flags & ACL_CONT)) {
4045 		struct l2cap_hdr *hdr;
4046 		struct l2cap_chan *chan;
4047 		u16 cid;
4048 		int len;
4049 
4050 		if (conn->rx_len) {
4051 			BT_ERR("Unexpected start frame (len %d)", skb->len);
4052 			kfree_skb(conn->rx_skb);
4053 			conn->rx_skb = NULL;
4054 			conn->rx_len = 0;
4055 			l2cap_conn_unreliable(conn, ECOMM);
4056 		}
4057 
4058 		/* Start fragment always begin with Basic L2CAP header */
4059 		if (skb->len < L2CAP_HDR_SIZE) {
4060 			BT_ERR("Frame is too short (len %d)", skb->len);
4061 			l2cap_conn_unreliable(conn, ECOMM);
4062 			goto drop;
4063 		}
4064 
4065 		hdr = (struct l2cap_hdr *) skb->data;
4066 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4067 		cid = __le16_to_cpu(hdr->cid);
4068 
4069 		if (len == skb->len) {
4070 			/* Complete frame received */
4071 			l2cap_recv_frame(conn, skb);
4072 			return 0;
4073 		}
4074 
4075 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4076 
4077 		if (skb->len > len) {
4078 			BT_ERR("Frame is too long (len %d, expected len %d)",
4079 				skb->len, len);
4080 			l2cap_conn_unreliable(conn, ECOMM);
4081 			goto drop;
4082 		}
4083 
4084 		chan = l2cap_get_chan_by_scid(conn, cid);
4085 
4086 		if (chan && chan->sk) {
4087 			struct sock *sk = chan->sk;
4088 
4089 			if (chan->imtu < len - L2CAP_HDR_SIZE) {
4090 				BT_ERR("Frame exceeding recv MTU (len %d, "
4091 							"MTU %d)", len,
4092 							chan->imtu);
4093 				bh_unlock_sock(sk);
4094 				l2cap_conn_unreliable(conn, ECOMM);
4095 				goto drop;
4096 			}
4097 			bh_unlock_sock(sk);
4098 		}
4099 
4100 		/* Allocate skb for the complete frame (with header) */
4101 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4102 		if (!conn->rx_skb)
4103 			goto drop;
4104 
4105 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4106 								skb->len);
4107 		conn->rx_len = len - skb->len;
4108 	} else {
4109 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4110 
4111 		if (!conn->rx_len) {
4112 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4113 			l2cap_conn_unreliable(conn, ECOMM);
4114 			goto drop;
4115 		}
4116 
4117 		if (skb->len > conn->rx_len) {
4118 			BT_ERR("Fragment is too long (len %d, expected %d)",
4119 					skb->len, conn->rx_len);
4120 			kfree_skb(conn->rx_skb);
4121 			conn->rx_skb = NULL;
4122 			conn->rx_len = 0;
4123 			l2cap_conn_unreliable(conn, ECOMM);
4124 			goto drop;
4125 		}
4126 
4127 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4128 								skb->len);
4129 		conn->rx_len -= skb->len;
4130 
4131 		if (!conn->rx_len) {
4132 			/* Complete frame received */
4133 			l2cap_recv_frame(conn, conn->rx_skb);
4134 			conn->rx_skb = NULL;
4135 		}
4136 	}
4137 
4138 drop:
4139 	kfree_skb(skb);
4140 	return 0;
4141 }
4142 
4143 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4144 {
4145 	struct l2cap_chan *c;
4146 
4147 	read_lock_bh(&chan_list_lock);
4148 
4149 	list_for_each_entry(c, &chan_list, global_l) {
4150 		struct sock *sk = c->sk;
4151 
4152 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4153 					batostr(&bt_sk(sk)->src),
4154 					batostr(&bt_sk(sk)->dst),
4155 					sk->sk_state, __le16_to_cpu(c->psm),
4156 					c->scid, c->dcid, c->imtu, c->omtu,
4157 					c->sec_level, c->mode);
4158 	}
4159 
4160 	read_unlock_bh(&chan_list_lock);
4161 
4162 	return 0;
4163 }
4164 
4165 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4166 {
4167 	return single_open(file, l2cap_debugfs_show, inode->i_private);
4168 }
4169 
4170 static const struct file_operations l2cap_debugfs_fops = {
4171 	.open		= l2cap_debugfs_open,
4172 	.read		= seq_read,
4173 	.llseek		= seq_lseek,
4174 	.release	= single_release,
4175 };
4176 
4177 static struct dentry *l2cap_debugfs;
4178 
4179 static struct hci_proto l2cap_hci_proto = {
4180 	.name		= "L2CAP",
4181 	.id		= HCI_PROTO_L2CAP,
4182 	.connect_ind	= l2cap_connect_ind,
4183 	.connect_cfm	= l2cap_connect_cfm,
4184 	.disconn_ind	= l2cap_disconn_ind,
4185 	.disconn_cfm	= l2cap_disconn_cfm,
4186 	.security_cfm	= l2cap_security_cfm,
4187 	.recv_acldata	= l2cap_recv_acldata
4188 };
4189 
4190 int __init l2cap_init(void)
4191 {
4192 	int err;
4193 
4194 	err = l2cap_init_sockets();
4195 	if (err < 0)
4196 		return err;
4197 
4198 	_busy_wq = create_singlethread_workqueue("l2cap");
4199 	if (!_busy_wq) {
4200 		err = -ENOMEM;
4201 		goto error;
4202 	}
4203 
4204 	err = hci_register_proto(&l2cap_hci_proto);
4205 	if (err < 0) {
4206 		BT_ERR("L2CAP protocol registration failed");
4207 		bt_sock_unregister(BTPROTO_L2CAP);
4208 		goto error;
4209 	}
4210 
4211 	if (bt_debugfs) {
4212 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4213 					bt_debugfs, NULL, &l2cap_debugfs_fops);
4214 		if (!l2cap_debugfs)
4215 			BT_ERR("Failed to create L2CAP debug file");
4216 	}
4217 
4218 	return 0;
4219 
4220 error:
4221 	destroy_workqueue(_busy_wq);
4222 	l2cap_cleanup_sockets();
4223 	return err;
4224 }
4225 
4226 void l2cap_exit(void)
4227 {
4228 	debugfs_remove(l2cap_debugfs);
4229 
4230 	flush_workqueue(_busy_wq);
4231 	destroy_workqueue(_busy_wq);
4232 
4233 	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4234 		BT_ERR("L2CAP protocol unregistration failed");
4235 
4236 	l2cap_cleanup_sockets();
4237 }
4238 
4239 module_param(disable_ertm, bool, 0644);
4240 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4241