xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 565d76cb)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6 
7    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License version 2 as
11    published by the Free Software Foundation;
12 
13    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 
22    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24    SOFTWARE IS DISCLAIMED.
25 */
26 
27 /* Bluetooth L2CAP core. */
28 
29 #include <linux/module.h>
30 
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50 
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53 
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 
58 int disable_ertm;
59 
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62 
63 static struct workqueue_struct *_busy_wq;
64 
65 struct bt_sock_list l2cap_sk_list = {
66 	.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68 
69 static void l2cap_busy_work(struct work_struct *work);
70 
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 				u8 code, u8 ident, u16 dlen, void *data);
73 
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
78 {
79 	struct sock *s;
80 	for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 		if (l2cap_pi(s)->dcid == cid)
82 			break;
83 	}
84 	return s;
85 }
86 
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
88 {
89 	struct sock *s;
90 	for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 		if (l2cap_pi(s)->scid == cid)
92 			break;
93 	}
94 	return s;
95 }
96 
97 /* Find channel with given SCID.
98  * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
100 {
101 	struct sock *s;
102 	read_lock(&l->lock);
103 	s = __l2cap_get_chan_by_scid(l, cid);
104 	if (s)
105 		bh_lock_sock(s);
106 	read_unlock(&l->lock);
107 	return s;
108 }
109 
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
111 {
112 	struct sock *s;
113 	for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 		if (l2cap_pi(s)->ident == ident)
115 			break;
116 	}
117 	return s;
118 }
119 
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
121 {
122 	struct sock *s;
123 	read_lock(&l->lock);
124 	s = __l2cap_get_chan_by_ident(l, ident);
125 	if (s)
126 		bh_lock_sock(s);
127 	read_unlock(&l->lock);
128 	return s;
129 }
130 
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
132 {
133 	u16 cid = L2CAP_CID_DYN_START;
134 
135 	for (; cid < L2CAP_CID_DYN_END; cid++) {
136 		if (!__l2cap_get_chan_by_scid(l, cid))
137 			return cid;
138 	}
139 
140 	return 0;
141 }
142 
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
144 {
145 	sock_hold(sk);
146 
147 	if (l->head)
148 		l2cap_pi(l->head)->prev_c = sk;
149 
150 	l2cap_pi(sk)->next_c = l->head;
151 	l2cap_pi(sk)->prev_c = NULL;
152 	l->head = sk;
153 }
154 
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
156 {
157 	struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
158 
159 	write_lock_bh(&l->lock);
160 	if (sk == l->head)
161 		l->head = next;
162 
163 	if (next)
164 		l2cap_pi(next)->prev_c = prev;
165 	if (prev)
166 		l2cap_pi(prev)->next_c = next;
167 	write_unlock_bh(&l->lock);
168 
169 	__sock_put(sk);
170 }
171 
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
173 {
174 	struct l2cap_chan_list *l = &conn->chan_list;
175 
176 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 			l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
178 
179 	conn->disc_reason = 0x13;
180 
181 	l2cap_pi(sk)->conn = conn;
182 
183 	if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 		if (conn->hcon->type == LE_LINK) {
185 			/* LE connection */
186 			l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 			l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 			l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 		} else {
190 			/* Alloc CID for connection-oriented socket */
191 			l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 			l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 		}
194 	} else if (sk->sk_type == SOCK_DGRAM) {
195 		/* Connectionless socket */
196 		l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 		l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 		l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 	} else {
200 		/* Raw socket can send/recv signalling messages only */
201 		l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 		l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 		l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
204 	}
205 
206 	__l2cap_chan_link(l, sk);
207 
208 	if (parent)
209 		bt_accept_enqueue(parent, sk);
210 }
211 
212 /* Delete channel.
213  * Must be called on the locked socket. */
214 void l2cap_chan_del(struct sock *sk, int err)
215 {
216 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
217 	struct sock *parent = bt_sk(sk)->parent;
218 
219 	l2cap_sock_clear_timer(sk);
220 
221 	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
222 
223 	if (conn) {
224 		/* Unlink from channel list */
225 		l2cap_chan_unlink(&conn->chan_list, sk);
226 		l2cap_pi(sk)->conn = NULL;
227 		hci_conn_put(conn->hcon);
228 	}
229 
230 	sk->sk_state = BT_CLOSED;
231 	sock_set_flag(sk, SOCK_ZAPPED);
232 
233 	if (err)
234 		sk->sk_err = err;
235 
236 	if (parent) {
237 		bt_accept_unlink(sk);
238 		parent->sk_data_ready(parent, 0);
239 	} else
240 		sk->sk_state_change(sk);
241 
242 	skb_queue_purge(TX_QUEUE(sk));
243 
244 	if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
245 		struct srej_list *l, *tmp;
246 
247 		del_timer(&l2cap_pi(sk)->retrans_timer);
248 		del_timer(&l2cap_pi(sk)->monitor_timer);
249 		del_timer(&l2cap_pi(sk)->ack_timer);
250 
251 		skb_queue_purge(SREJ_QUEUE(sk));
252 		skb_queue_purge(BUSY_QUEUE(sk));
253 
254 		list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
255 			list_del(&l->list);
256 			kfree(l);
257 		}
258 	}
259 }
260 
261 static inline u8 l2cap_get_auth_type(struct sock *sk)
262 {
263 	if (sk->sk_type == SOCK_RAW) {
264 		switch (l2cap_pi(sk)->sec_level) {
265 		case BT_SECURITY_HIGH:
266 			return HCI_AT_DEDICATED_BONDING_MITM;
267 		case BT_SECURITY_MEDIUM:
268 			return HCI_AT_DEDICATED_BONDING;
269 		default:
270 			return HCI_AT_NO_BONDING;
271 		}
272 	} else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 		if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 			l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275 
276 		if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
277 			return HCI_AT_NO_BONDING_MITM;
278 		else
279 			return HCI_AT_NO_BONDING;
280 	} else {
281 		switch (l2cap_pi(sk)->sec_level) {
282 		case BT_SECURITY_HIGH:
283 			return HCI_AT_GENERAL_BONDING_MITM;
284 		case BT_SECURITY_MEDIUM:
285 			return HCI_AT_GENERAL_BONDING;
286 		default:
287 			return HCI_AT_NO_BONDING;
288 		}
289 	}
290 }
291 
292 /* Service level security */
293 static inline int l2cap_check_security(struct sock *sk)
294 {
295 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
296 	__u8 auth_type;
297 
298 	auth_type = l2cap_get_auth_type(sk);
299 
300 	return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 								auth_type);
302 }
303 
304 u8 l2cap_get_ident(struct l2cap_conn *conn)
305 {
306 	u8 id;
307 
308 	/* Get next available identificator.
309 	 *    1 - 128 are used by kernel.
310 	 *  129 - 199 are reserved.
311 	 *  200 - 254 are used by utilities like l2ping, etc.
312 	 */
313 
314 	spin_lock_bh(&conn->lock);
315 
316 	if (++conn->tx_ident > 128)
317 		conn->tx_ident = 1;
318 
319 	id = conn->tx_ident;
320 
321 	spin_unlock_bh(&conn->lock);
322 
323 	return id;
324 }
325 
326 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 {
328 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 	u8 flags;
330 
331 	BT_DBG("code 0x%2.2x", code);
332 
333 	if (!skb)
334 		return;
335 
336 	if (lmp_no_flush_capable(conn->hcon->hdev))
337 		flags = ACL_START_NO_FLUSH;
338 	else
339 		flags = ACL_START;
340 
341 	hci_send_acl(conn->hcon, skb, flags);
342 }
343 
344 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
345 {
346 	struct sk_buff *skb;
347 	struct l2cap_hdr *lh;
348 	struct l2cap_conn *conn = pi->conn;
349 	struct sock *sk = (struct sock *)pi;
350 	int count, hlen = L2CAP_HDR_SIZE + 2;
351 	u8 flags;
352 
353 	if (sk->sk_state != BT_CONNECTED)
354 		return;
355 
356 	if (pi->fcs == L2CAP_FCS_CRC16)
357 		hlen += 2;
358 
359 	BT_DBG("pi %p, control 0x%2.2x", pi, control);
360 
361 	count = min_t(unsigned int, conn->mtu, hlen);
362 	control |= L2CAP_CTRL_FRAME_TYPE;
363 
364 	if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 		control |= L2CAP_CTRL_FINAL;
366 		pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 	}
368 
369 	if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 		control |= L2CAP_CTRL_POLL;
371 		pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 	}
373 
374 	skb = bt_skb_alloc(count, GFP_ATOMIC);
375 	if (!skb)
376 		return;
377 
378 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 	lh->cid = cpu_to_le16(pi->dcid);
381 	put_unaligned_le16(control, skb_put(skb, 2));
382 
383 	if (pi->fcs == L2CAP_FCS_CRC16) {
384 		u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 		put_unaligned_le16(fcs, skb_put(skb, 2));
386 	}
387 
388 	if (lmp_no_flush_capable(conn->hcon->hdev))
389 		flags = ACL_START_NO_FLUSH;
390 	else
391 		flags = ACL_START;
392 
393 	hci_send_acl(pi->conn->hcon, skb, flags);
394 }
395 
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
397 {
398 	if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 		control |= L2CAP_SUPER_RCV_NOT_READY;
400 		pi->conn_state |= L2CAP_CONN_RNR_SENT;
401 	} else
402 		control |= L2CAP_SUPER_RCV_READY;
403 
404 	control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405 
406 	l2cap_send_sframe(pi, control);
407 }
408 
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
410 {
411 	return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
412 }
413 
414 static void l2cap_do_start(struct sock *sk)
415 {
416 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
417 
418 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 			return;
421 
422 		if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 			struct l2cap_conn_req req;
424 			req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 			req.psm  = l2cap_pi(sk)->psm;
426 
427 			l2cap_pi(sk)->ident = l2cap_get_ident(conn);
428 			l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
429 
430 			l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 					L2CAP_CONN_REQ, sizeof(req), &req);
432 		}
433 	} else {
434 		struct l2cap_info_req req;
435 		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
436 
437 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 		conn->info_ident = l2cap_get_ident(conn);
439 
440 		mod_timer(&conn->info_timer, jiffies +
441 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
442 
443 		l2cap_send_cmd(conn, conn->info_ident,
444 					L2CAP_INFO_REQ, sizeof(req), &req);
445 	}
446 }
447 
448 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
449 {
450 	u32 local_feat_mask = l2cap_feat_mask;
451 	if (!disable_ertm)
452 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
453 
454 	switch (mode) {
455 	case L2CAP_MODE_ERTM:
456 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 	case L2CAP_MODE_STREAMING:
458 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
459 	default:
460 		return 0x00;
461 	}
462 }
463 
464 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
465 {
466 	struct l2cap_disconn_req req;
467 
468 	if (!conn)
469 		return;
470 
471 	skb_queue_purge(TX_QUEUE(sk));
472 
473 	if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 		del_timer(&l2cap_pi(sk)->retrans_timer);
475 		del_timer(&l2cap_pi(sk)->monitor_timer);
476 		del_timer(&l2cap_pi(sk)->ack_timer);
477 	}
478 
479 	req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 	req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 			L2CAP_DISCONN_REQ, sizeof(req), &req);
483 
484 	sk->sk_state = BT_DISCONN;
485 	sk->sk_err = err;
486 }
487 
488 /* ---- L2CAP connections ---- */
489 static void l2cap_conn_start(struct l2cap_conn *conn)
490 {
491 	struct l2cap_chan_list *l = &conn->chan_list;
492 	struct sock_del_list del, *tmp1, *tmp2;
493 	struct sock *sk;
494 
495 	BT_DBG("conn %p", conn);
496 
497 	INIT_LIST_HEAD(&del.list);
498 
499 	read_lock(&l->lock);
500 
501 	for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 		bh_lock_sock(sk);
503 
504 		if (sk->sk_type != SOCK_SEQPACKET &&
505 				sk->sk_type != SOCK_STREAM) {
506 			bh_unlock_sock(sk);
507 			continue;
508 		}
509 
510 		if (sk->sk_state == BT_CONNECT) {
511 			struct l2cap_conn_req req;
512 
513 			if (!l2cap_check_security(sk) ||
514 					!__l2cap_no_conn_pending(sk)) {
515 				bh_unlock_sock(sk);
516 				continue;
517 			}
518 
519 			if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
520 					conn->feat_mask)
521 					&& l2cap_pi(sk)->conf_state &
522 					L2CAP_CONF_STATE2_DEVICE) {
523 				tmp1 = kzalloc(sizeof(struct sock_del_list),
524 						GFP_ATOMIC);
525 				tmp1->sk = sk;
526 				list_add_tail(&tmp1->list, &del.list);
527 				bh_unlock_sock(sk);
528 				continue;
529 			}
530 
531 			req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 			req.psm  = l2cap_pi(sk)->psm;
533 
534 			l2cap_pi(sk)->ident = l2cap_get_ident(conn);
535 			l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
536 
537 			l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 				L2CAP_CONN_REQ, sizeof(req), &req);
539 
540 		} else if (sk->sk_state == BT_CONNECT2) {
541 			struct l2cap_conn_rsp rsp;
542 			char buf[128];
543 			rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 			rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
545 
546 			if (l2cap_check_security(sk)) {
547 				if (bt_sk(sk)->defer_setup) {
548 					struct sock *parent = bt_sk(sk)->parent;
549 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 					parent->sk_data_ready(parent, 0);
552 
553 				} else {
554 					sk->sk_state = BT_CONFIG;
555 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
557 				}
558 			} else {
559 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 			}
562 
563 			l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
564 					L2CAP_CONN_RSP, sizeof(rsp), &rsp);
565 
566 			if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 					rsp.result != L2CAP_CR_SUCCESS) {
568 				bh_unlock_sock(sk);
569 				continue;
570 			}
571 
572 			l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 						l2cap_build_conf_req(sk, buf), buf);
575 			l2cap_pi(sk)->num_conf_req++;
576 		}
577 
578 		bh_unlock_sock(sk);
579 	}
580 
581 	read_unlock(&l->lock);
582 
583 	list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 		bh_lock_sock(tmp1->sk);
585 		__l2cap_sock_close(tmp1->sk, ECONNRESET);
586 		bh_unlock_sock(tmp1->sk);
587 		list_del(&tmp1->list);
588 		kfree(tmp1);
589 	}
590 }
591 
592 /* Find socket with cid and source bdaddr.
593  * Returns closest match, locked.
594  */
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596 {
597 	struct sock *s, *sk = NULL, *sk1 = NULL;
598 	struct hlist_node *node;
599 
600 	read_lock(&l2cap_sk_list.lock);
601 
602 	sk_for_each(sk, node, &l2cap_sk_list.head) {
603 		if (state && sk->sk_state != state)
604 			continue;
605 
606 		if (l2cap_pi(sk)->scid == cid) {
607 			/* Exact match. */
608 			if (!bacmp(&bt_sk(sk)->src, src))
609 				break;
610 
611 			/* Closest match */
612 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 				sk1 = sk;
614 		}
615 	}
616 	s = node ? sk : sk1;
617 	if (s)
618 		bh_lock_sock(s);
619 	read_unlock(&l2cap_sk_list.lock);
620 
621 	return s;
622 }
623 
624 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625 {
626 	struct l2cap_chan_list *list = &conn->chan_list;
627 	struct sock *parent, *uninitialized_var(sk);
628 
629 	BT_DBG("");
630 
631 	/* Check if we have socket listening on cid */
632 	parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 							conn->src);
634 	if (!parent)
635 		return;
636 
637 	/* Check for backlog size */
638 	if (sk_acceptq_is_full(parent)) {
639 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 		goto clean;
641 	}
642 
643 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 	if (!sk)
645 		goto clean;
646 
647 	write_lock_bh(&list->lock);
648 
649 	hci_conn_hold(conn->hcon);
650 
651 	l2cap_sock_init(sk, parent);
652 	bacpy(&bt_sk(sk)->src, conn->src);
653 	bacpy(&bt_sk(sk)->dst, conn->dst);
654 
655 	__l2cap_chan_add(conn, sk, parent);
656 
657 	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 
659 	sk->sk_state = BT_CONNECTED;
660 	parent->sk_data_ready(parent, 0);
661 
662 	write_unlock_bh(&list->lock);
663 
664 clean:
665 	bh_unlock_sock(parent);
666 }
667 
668 static void l2cap_conn_ready(struct l2cap_conn *conn)
669 {
670 	struct l2cap_chan_list *l = &conn->chan_list;
671 	struct sock *sk;
672 
673 	BT_DBG("conn %p", conn);
674 
675 	if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 		l2cap_le_conn_ready(conn);
677 
678 	read_lock(&l->lock);
679 
680 	for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 		bh_lock_sock(sk);
682 
683 		if (conn->hcon->type == LE_LINK) {
684 			l2cap_sock_clear_timer(sk);
685 			sk->sk_state = BT_CONNECTED;
686 			sk->sk_state_change(sk);
687 		}
688 
689 		if (sk->sk_type != SOCK_SEQPACKET &&
690 				sk->sk_type != SOCK_STREAM) {
691 			l2cap_sock_clear_timer(sk);
692 			sk->sk_state = BT_CONNECTED;
693 			sk->sk_state_change(sk);
694 		} else if (sk->sk_state == BT_CONNECT)
695 			l2cap_do_start(sk);
696 
697 		bh_unlock_sock(sk);
698 	}
699 
700 	read_unlock(&l->lock);
701 }
702 
703 /* Notify sockets that we cannot guaranty reliability anymore */
704 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705 {
706 	struct l2cap_chan_list *l = &conn->chan_list;
707 	struct sock *sk;
708 
709 	BT_DBG("conn %p", conn);
710 
711 	read_lock(&l->lock);
712 
713 	for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
714 		if (l2cap_pi(sk)->force_reliable)
715 			sk->sk_err = err;
716 	}
717 
718 	read_unlock(&l->lock);
719 }
720 
721 static void l2cap_info_timeout(unsigned long arg)
722 {
723 	struct l2cap_conn *conn = (void *) arg;
724 
725 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
726 	conn->info_ident = 0;
727 
728 	l2cap_conn_start(conn);
729 }
730 
731 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
732 {
733 	struct l2cap_conn *conn = hcon->l2cap_data;
734 
735 	if (conn || status)
736 		return conn;
737 
738 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
739 	if (!conn)
740 		return NULL;
741 
742 	hcon->l2cap_data = conn;
743 	conn->hcon = hcon;
744 
745 	BT_DBG("hcon %p conn %p", hcon, conn);
746 
747 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 		conn->mtu = hcon->hdev->le_mtu;
749 	else
750 		conn->mtu = hcon->hdev->acl_mtu;
751 
752 	conn->src = &hcon->hdev->bdaddr;
753 	conn->dst = &hcon->dst;
754 
755 	conn->feat_mask = 0;
756 
757 	spin_lock_init(&conn->lock);
758 	rwlock_init(&conn->chan_list.lock);
759 
760 	if (hcon->type != LE_LINK)
761 		setup_timer(&conn->info_timer, l2cap_info_timeout,
762 						(unsigned long) conn);
763 
764 	conn->disc_reason = 0x13;
765 
766 	return conn;
767 }
768 
769 static void l2cap_conn_del(struct hci_conn *hcon, int err)
770 {
771 	struct l2cap_conn *conn = hcon->l2cap_data;
772 	struct sock *sk;
773 
774 	if (!conn)
775 		return;
776 
777 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
778 
779 	kfree_skb(conn->rx_skb);
780 
781 	/* Kill channels */
782 	while ((sk = conn->chan_list.head)) {
783 		bh_lock_sock(sk);
784 		l2cap_chan_del(sk, err);
785 		bh_unlock_sock(sk);
786 		l2cap_sock_kill(sk);
787 	}
788 
789 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
790 		del_timer_sync(&conn->info_timer);
791 
792 	hcon->l2cap_data = NULL;
793 	kfree(conn);
794 }
795 
796 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
797 {
798 	struct l2cap_chan_list *l = &conn->chan_list;
799 	write_lock_bh(&l->lock);
800 	__l2cap_chan_add(conn, sk, parent);
801 	write_unlock_bh(&l->lock);
802 }
803 
804 /* ---- Socket interface ---- */
805 
806 /* Find socket with psm and source bdaddr.
807  * Returns closest match.
808  */
809 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
810 {
811 	struct sock *sk = NULL, *sk1 = NULL;
812 	struct hlist_node *node;
813 
814 	read_lock(&l2cap_sk_list.lock);
815 
816 	sk_for_each(sk, node, &l2cap_sk_list.head) {
817 		if (state && sk->sk_state != state)
818 			continue;
819 
820 		if (l2cap_pi(sk)->psm == psm) {
821 			/* Exact match. */
822 			if (!bacmp(&bt_sk(sk)->src, src))
823 				break;
824 
825 			/* Closest match */
826 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 				sk1 = sk;
828 		}
829 	}
830 
831 	read_unlock(&l2cap_sk_list.lock);
832 
833 	return node ? sk : sk1;
834 }
835 
836 int l2cap_do_connect(struct sock *sk)
837 {
838 	bdaddr_t *src = &bt_sk(sk)->src;
839 	bdaddr_t *dst = &bt_sk(sk)->dst;
840 	struct l2cap_conn *conn;
841 	struct hci_conn *hcon;
842 	struct hci_dev *hdev;
843 	__u8 auth_type;
844 	int err;
845 
846 	BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 							l2cap_pi(sk)->psm);
848 
849 	hdev = hci_get_route(dst, src);
850 	if (!hdev)
851 		return -EHOSTUNREACH;
852 
853 	hci_dev_lock_bh(hdev);
854 
855 	auth_type = l2cap_get_auth_type(sk);
856 
857 	if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
858 		hcon = hci_connect(hdev, LE_LINK, dst,
859 					l2cap_pi(sk)->sec_level, auth_type);
860 	else
861 		hcon = hci_connect(hdev, ACL_LINK, dst,
862 					l2cap_pi(sk)->sec_level, auth_type);
863 
864 	if (IS_ERR(hcon)) {
865 		err = PTR_ERR(hcon);
866 		goto done;
867 	}
868 
869 	conn = l2cap_conn_add(hcon, 0);
870 	if (!conn) {
871 		hci_conn_put(hcon);
872 		err = -ENOMEM;
873 		goto done;
874 	}
875 
876 	/* Update source addr of the socket */
877 	bacpy(src, conn->src);
878 
879 	l2cap_chan_add(conn, sk, NULL);
880 
881 	sk->sk_state = BT_CONNECT;
882 	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
883 
884 	if (hcon->state == BT_CONNECTED) {
885 		if (sk->sk_type != SOCK_SEQPACKET &&
886 				sk->sk_type != SOCK_STREAM) {
887 			l2cap_sock_clear_timer(sk);
888 			if (l2cap_check_security(sk))
889 				sk->sk_state = BT_CONNECTED;
890 		} else
891 			l2cap_do_start(sk);
892 	}
893 
894 	err = 0;
895 
896 done:
897 	hci_dev_unlock_bh(hdev);
898 	hci_dev_put(hdev);
899 	return err;
900 }
901 
902 int __l2cap_wait_ack(struct sock *sk)
903 {
904 	DECLARE_WAITQUEUE(wait, current);
905 	int err = 0;
906 	int timeo = HZ/5;
907 
908 	add_wait_queue(sk_sleep(sk), &wait);
909 	while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
910 		set_current_state(TASK_INTERRUPTIBLE);
911 
912 		if (!timeo)
913 			timeo = HZ/5;
914 
915 		if (signal_pending(current)) {
916 			err = sock_intr_errno(timeo);
917 			break;
918 		}
919 
920 		release_sock(sk);
921 		timeo = schedule_timeout(timeo);
922 		lock_sock(sk);
923 
924 		err = sock_error(sk);
925 		if (err)
926 			break;
927 	}
928 	set_current_state(TASK_RUNNING);
929 	remove_wait_queue(sk_sleep(sk), &wait);
930 	return err;
931 }
932 
933 static void l2cap_monitor_timeout(unsigned long arg)
934 {
935 	struct sock *sk = (void *) arg;
936 
937 	BT_DBG("sk %p", sk);
938 
939 	bh_lock_sock(sk);
940 	if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
941 		l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
942 		bh_unlock_sock(sk);
943 		return;
944 	}
945 
946 	l2cap_pi(sk)->retry_count++;
947 	__mod_monitor_timer();
948 
949 	l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
950 	bh_unlock_sock(sk);
951 }
952 
953 static void l2cap_retrans_timeout(unsigned long arg)
954 {
955 	struct sock *sk = (void *) arg;
956 
957 	BT_DBG("sk %p", sk);
958 
959 	bh_lock_sock(sk);
960 	l2cap_pi(sk)->retry_count = 1;
961 	__mod_monitor_timer();
962 
963 	l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
964 
965 	l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
966 	bh_unlock_sock(sk);
967 }
968 
969 static void l2cap_drop_acked_frames(struct sock *sk)
970 {
971 	struct sk_buff *skb;
972 
973 	while ((skb = skb_peek(TX_QUEUE(sk))) &&
974 			l2cap_pi(sk)->unacked_frames) {
975 		if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
976 			break;
977 
978 		skb = skb_dequeue(TX_QUEUE(sk));
979 		kfree_skb(skb);
980 
981 		l2cap_pi(sk)->unacked_frames--;
982 	}
983 
984 	if (!l2cap_pi(sk)->unacked_frames)
985 		del_timer(&l2cap_pi(sk)->retrans_timer);
986 }
987 
988 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
989 {
990 	struct l2cap_pinfo *pi = l2cap_pi(sk);
991 	struct hci_conn *hcon = pi->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
995 
996 	if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
997 		flags = ACL_START_NO_FLUSH;
998 	else
999 		flags = ACL_START;
1000 
1001 	hci_send_acl(hcon, skb, flags);
1002 }
1003 
1004 void l2cap_streaming_send(struct sock *sk)
1005 {
1006 	struct sk_buff *skb;
1007 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 	u16 control, fcs;
1009 
1010 	while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1011 		control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 		control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 		put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 
1015 		if (pi->fcs == L2CAP_FCS_CRC16) {
1016 			fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 			put_unaligned_le16(fcs, skb->data + skb->len - 2);
1018 		}
1019 
1020 		l2cap_do_send(sk, skb);
1021 
1022 		pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1023 	}
1024 }
1025 
1026 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1027 {
1028 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 	struct sk_buff *skb, *tx_skb;
1030 	u16 control, fcs;
1031 
1032 	skb = skb_peek(TX_QUEUE(sk));
1033 	if (!skb)
1034 		return;
1035 
1036 	do {
1037 		if (bt_cb(skb)->tx_seq == tx_seq)
1038 			break;
1039 
1040 		if (skb_queue_is_last(TX_QUEUE(sk), skb))
1041 			return;
1042 
1043 	} while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1044 
1045 	if (pi->remote_max_tx &&
1046 			bt_cb(skb)->retries == pi->remote_max_tx) {
1047 		l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1048 		return;
1049 	}
1050 
1051 	tx_skb = skb_clone(skb, GFP_ATOMIC);
1052 	bt_cb(skb)->retries++;
1053 	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 
1055 	if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1056 		control |= L2CAP_CTRL_FINAL;
1057 		pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1058 	}
1059 
1060 	control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1061 			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1062 
1063 	put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1064 
1065 	if (pi->fcs == L2CAP_FCS_CRC16) {
1066 		fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1067 		put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1068 	}
1069 
1070 	l2cap_do_send(sk, tx_skb);
1071 }
1072 
1073 int l2cap_ertm_send(struct sock *sk)
1074 {
1075 	struct sk_buff *skb, *tx_skb;
1076 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1077 	u16 control, fcs;
1078 	int nsent = 0;
1079 
1080 	if (sk->sk_state != BT_CONNECTED)
1081 		return -ENOTCONN;
1082 
1083 	while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1084 
1085 		if (pi->remote_max_tx &&
1086 				bt_cb(skb)->retries == pi->remote_max_tx) {
1087 			l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1088 			break;
1089 		}
1090 
1091 		tx_skb = skb_clone(skb, GFP_ATOMIC);
1092 
1093 		bt_cb(skb)->retries++;
1094 
1095 		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1096 		control &= L2CAP_CTRL_SAR;
1097 
1098 		if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1099 			control |= L2CAP_CTRL_FINAL;
1100 			pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1101 		}
1102 		control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1103 				| (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1104 		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1105 
1106 
1107 		if (pi->fcs == L2CAP_FCS_CRC16) {
1108 			fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1109 			put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1110 		}
1111 
1112 		l2cap_do_send(sk, tx_skb);
1113 
1114 		__mod_retrans_timer();
1115 
1116 		bt_cb(skb)->tx_seq = pi->next_tx_seq;
1117 		pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1118 
1119 		pi->unacked_frames++;
1120 		pi->frames_sent++;
1121 
1122 		if (skb_queue_is_last(TX_QUEUE(sk), skb))
1123 			sk->sk_send_head = NULL;
1124 		else
1125 			sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1126 
1127 		nsent++;
1128 	}
1129 
1130 	return nsent;
1131 }
1132 
1133 static int l2cap_retransmit_frames(struct sock *sk)
1134 {
1135 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1136 	int ret;
1137 
1138 	if (!skb_queue_empty(TX_QUEUE(sk)))
1139 		sk->sk_send_head = TX_QUEUE(sk)->next;
1140 
1141 	pi->next_tx_seq = pi->expected_ack_seq;
1142 	ret = l2cap_ertm_send(sk);
1143 	return ret;
1144 }
1145 
1146 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1147 {
1148 	struct sock *sk = (struct sock *)pi;
1149 	u16 control = 0;
1150 
1151 	control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1152 
1153 	if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1154 		control |= L2CAP_SUPER_RCV_NOT_READY;
1155 		pi->conn_state |= L2CAP_CONN_RNR_SENT;
1156 		l2cap_send_sframe(pi, control);
1157 		return;
1158 	}
1159 
1160 	if (l2cap_ertm_send(sk) > 0)
1161 		return;
1162 
1163 	control |= L2CAP_SUPER_RCV_READY;
1164 	l2cap_send_sframe(pi, control);
1165 }
1166 
1167 static void l2cap_send_srejtail(struct sock *sk)
1168 {
1169 	struct srej_list *tail;
1170 	u16 control;
1171 
1172 	control = L2CAP_SUPER_SELECT_REJECT;
1173 	control |= L2CAP_CTRL_FINAL;
1174 
1175 	tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1176 	control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1177 
1178 	l2cap_send_sframe(l2cap_pi(sk), control);
1179 }
1180 
1181 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1182 {
1183 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1184 	struct sk_buff **frag;
1185 	int err, sent = 0;
1186 
1187 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1188 		return -EFAULT;
1189 
1190 	sent += count;
1191 	len  -= count;
1192 
1193 	/* Continuation fragments (no L2CAP header) */
1194 	frag = &skb_shinfo(skb)->frag_list;
1195 	while (len) {
1196 		count = min_t(unsigned int, conn->mtu, len);
1197 
1198 		*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1199 		if (!*frag)
1200 			return err;
1201 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1202 			return -EFAULT;
1203 
1204 		sent += count;
1205 		len  -= count;
1206 
1207 		frag = &(*frag)->next;
1208 	}
1209 
1210 	return sent;
1211 }
1212 
1213 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1214 {
1215 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1216 	struct sk_buff *skb;
1217 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1218 	struct l2cap_hdr *lh;
1219 
1220 	BT_DBG("sk %p len %d", sk, (int)len);
1221 
1222 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1223 	skb = bt_skb_send_alloc(sk, count + hlen,
1224 			msg->msg_flags & MSG_DONTWAIT, &err);
1225 	if (!skb)
1226 		return ERR_PTR(err);
1227 
1228 	/* Create L2CAP header */
1229 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1230 	lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1231 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1232 	put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1233 
1234 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1235 	if (unlikely(err < 0)) {
1236 		kfree_skb(skb);
1237 		return ERR_PTR(err);
1238 	}
1239 	return skb;
1240 }
1241 
1242 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1243 {
1244 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1245 	struct sk_buff *skb;
1246 	int err, count, hlen = L2CAP_HDR_SIZE;
1247 	struct l2cap_hdr *lh;
1248 
1249 	BT_DBG("sk %p len %d", sk, (int)len);
1250 
1251 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1252 	skb = bt_skb_send_alloc(sk, count + hlen,
1253 			msg->msg_flags & MSG_DONTWAIT, &err);
1254 	if (!skb)
1255 		return ERR_PTR(err);
1256 
1257 	/* Create L2CAP header */
1258 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1259 	lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1260 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1261 
1262 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1263 	if (unlikely(err < 0)) {
1264 		kfree_skb(skb);
1265 		return ERR_PTR(err);
1266 	}
1267 	return skb;
1268 }
1269 
1270 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1271 {
1272 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1273 	struct sk_buff *skb;
1274 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1275 	struct l2cap_hdr *lh;
1276 
1277 	BT_DBG("sk %p len %d", sk, (int)len);
1278 
1279 	if (!conn)
1280 		return ERR_PTR(-ENOTCONN);
1281 
1282 	if (sdulen)
1283 		hlen += 2;
1284 
1285 	if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1286 		hlen += 2;
1287 
1288 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1289 	skb = bt_skb_send_alloc(sk, count + hlen,
1290 			msg->msg_flags & MSG_DONTWAIT, &err);
1291 	if (!skb)
1292 		return ERR_PTR(err);
1293 
1294 	/* Create L2CAP header */
1295 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1296 	lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1297 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1298 	put_unaligned_le16(control, skb_put(skb, 2));
1299 	if (sdulen)
1300 		put_unaligned_le16(sdulen, skb_put(skb, 2));
1301 
1302 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1303 	if (unlikely(err < 0)) {
1304 		kfree_skb(skb);
1305 		return ERR_PTR(err);
1306 	}
1307 
1308 	if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1309 		put_unaligned_le16(0, skb_put(skb, 2));
1310 
1311 	bt_cb(skb)->retries = 0;
1312 	return skb;
1313 }
1314 
1315 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1316 {
1317 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1318 	struct sk_buff *skb;
1319 	struct sk_buff_head sar_queue;
1320 	u16 control;
1321 	size_t size = 0;
1322 
1323 	skb_queue_head_init(&sar_queue);
1324 	control = L2CAP_SDU_START;
1325 	skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1326 	if (IS_ERR(skb))
1327 		return PTR_ERR(skb);
1328 
1329 	__skb_queue_tail(&sar_queue, skb);
1330 	len -= pi->remote_mps;
1331 	size += pi->remote_mps;
1332 
1333 	while (len > 0) {
1334 		size_t buflen;
1335 
1336 		if (len > pi->remote_mps) {
1337 			control = L2CAP_SDU_CONTINUE;
1338 			buflen = pi->remote_mps;
1339 		} else {
1340 			control = L2CAP_SDU_END;
1341 			buflen = len;
1342 		}
1343 
1344 		skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1345 		if (IS_ERR(skb)) {
1346 			skb_queue_purge(&sar_queue);
1347 			return PTR_ERR(skb);
1348 		}
1349 
1350 		__skb_queue_tail(&sar_queue, skb);
1351 		len -= buflen;
1352 		size += buflen;
1353 	}
1354 	skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1355 	if (sk->sk_send_head == NULL)
1356 		sk->sk_send_head = sar_queue.next;
1357 
1358 	return size;
1359 }
1360 
1361 static void l2cap_chan_ready(struct sock *sk)
1362 {
1363 	struct sock *parent = bt_sk(sk)->parent;
1364 
1365 	BT_DBG("sk %p, parent %p", sk, parent);
1366 
1367 	l2cap_pi(sk)->conf_state = 0;
1368 	l2cap_sock_clear_timer(sk);
1369 
1370 	if (!parent) {
1371 		/* Outgoing channel.
1372 		 * Wake up socket sleeping on connect.
1373 		 */
1374 		sk->sk_state = BT_CONNECTED;
1375 		sk->sk_state_change(sk);
1376 	} else {
1377 		/* Incoming channel.
1378 		 * Wake up socket sleeping on accept.
1379 		 */
1380 		parent->sk_data_ready(parent, 0);
1381 	}
1382 }
1383 
1384 /* Copy frame to all raw sockets on that connection */
1385 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1386 {
1387 	struct l2cap_chan_list *l = &conn->chan_list;
1388 	struct sk_buff *nskb;
1389 	struct sock *sk;
1390 
1391 	BT_DBG("conn %p", conn);
1392 
1393 	read_lock(&l->lock);
1394 	for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1395 		if (sk->sk_type != SOCK_RAW)
1396 			continue;
1397 
1398 		/* Don't send frame to the socket it came from */
1399 		if (skb->sk == sk)
1400 			continue;
1401 		nskb = skb_clone(skb, GFP_ATOMIC);
1402 		if (!nskb)
1403 			continue;
1404 
1405 		if (sock_queue_rcv_skb(sk, nskb))
1406 			kfree_skb(nskb);
1407 	}
1408 	read_unlock(&l->lock);
1409 }
1410 
1411 /* ---- L2CAP signalling commands ---- */
1412 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1413 				u8 code, u8 ident, u16 dlen, void *data)
1414 {
1415 	struct sk_buff *skb, **frag;
1416 	struct l2cap_cmd_hdr *cmd;
1417 	struct l2cap_hdr *lh;
1418 	int len, count;
1419 
1420 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1421 			conn, code, ident, dlen);
1422 
1423 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1424 	count = min_t(unsigned int, conn->mtu, len);
1425 
1426 	skb = bt_skb_alloc(count, GFP_ATOMIC);
1427 	if (!skb)
1428 		return NULL;
1429 
1430 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1431 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1432 
1433 	if (conn->hcon->type == LE_LINK)
1434 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1435 	else
1436 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1437 
1438 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1439 	cmd->code  = code;
1440 	cmd->ident = ident;
1441 	cmd->len   = cpu_to_le16(dlen);
1442 
1443 	if (dlen) {
1444 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1445 		memcpy(skb_put(skb, count), data, count);
1446 		data += count;
1447 	}
1448 
1449 	len -= skb->len;
1450 
1451 	/* Continuation fragments (no L2CAP header) */
1452 	frag = &skb_shinfo(skb)->frag_list;
1453 	while (len) {
1454 		count = min_t(unsigned int, conn->mtu, len);
1455 
1456 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
1457 		if (!*frag)
1458 			goto fail;
1459 
1460 		memcpy(skb_put(*frag, count), data, count);
1461 
1462 		len  -= count;
1463 		data += count;
1464 
1465 		frag = &(*frag)->next;
1466 	}
1467 
1468 	return skb;
1469 
1470 fail:
1471 	kfree_skb(skb);
1472 	return NULL;
1473 }
1474 
1475 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1476 {
1477 	struct l2cap_conf_opt *opt = *ptr;
1478 	int len;
1479 
1480 	len = L2CAP_CONF_OPT_SIZE + opt->len;
1481 	*ptr += len;
1482 
1483 	*type = opt->type;
1484 	*olen = opt->len;
1485 
1486 	switch (opt->len) {
1487 	case 1:
1488 		*val = *((u8 *) opt->val);
1489 		break;
1490 
1491 	case 2:
1492 		*val = get_unaligned_le16(opt->val);
1493 		break;
1494 
1495 	case 4:
1496 		*val = get_unaligned_le32(opt->val);
1497 		break;
1498 
1499 	default:
1500 		*val = (unsigned long) opt->val;
1501 		break;
1502 	}
1503 
1504 	BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1505 	return len;
1506 }
1507 
1508 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1509 {
1510 	struct l2cap_conf_opt *opt = *ptr;
1511 
1512 	BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1513 
1514 	opt->type = type;
1515 	opt->len  = len;
1516 
1517 	switch (len) {
1518 	case 1:
1519 		*((u8 *) opt->val)  = val;
1520 		break;
1521 
1522 	case 2:
1523 		put_unaligned_le16(val, opt->val);
1524 		break;
1525 
1526 	case 4:
1527 		put_unaligned_le32(val, opt->val);
1528 		break;
1529 
1530 	default:
1531 		memcpy(opt->val, (void *) val, len);
1532 		break;
1533 	}
1534 
1535 	*ptr += L2CAP_CONF_OPT_SIZE + len;
1536 }
1537 
1538 static void l2cap_ack_timeout(unsigned long arg)
1539 {
1540 	struct sock *sk = (void *) arg;
1541 
1542 	bh_lock_sock(sk);
1543 	l2cap_send_ack(l2cap_pi(sk));
1544 	bh_unlock_sock(sk);
1545 }
1546 
1547 static inline void l2cap_ertm_init(struct sock *sk)
1548 {
1549 	l2cap_pi(sk)->expected_ack_seq = 0;
1550 	l2cap_pi(sk)->unacked_frames = 0;
1551 	l2cap_pi(sk)->buffer_seq = 0;
1552 	l2cap_pi(sk)->num_acked = 0;
1553 	l2cap_pi(sk)->frames_sent = 0;
1554 
1555 	setup_timer(&l2cap_pi(sk)->retrans_timer,
1556 			l2cap_retrans_timeout, (unsigned long) sk);
1557 	setup_timer(&l2cap_pi(sk)->monitor_timer,
1558 			l2cap_monitor_timeout, (unsigned long) sk);
1559 	setup_timer(&l2cap_pi(sk)->ack_timer,
1560 			l2cap_ack_timeout, (unsigned long) sk);
1561 
1562 	__skb_queue_head_init(SREJ_QUEUE(sk));
1563 	__skb_queue_head_init(BUSY_QUEUE(sk));
1564 
1565 	INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1566 
1567 	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1568 }
1569 
1570 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1571 {
1572 	switch (mode) {
1573 	case L2CAP_MODE_STREAMING:
1574 	case L2CAP_MODE_ERTM:
1575 		if (l2cap_mode_supported(mode, remote_feat_mask))
1576 			return mode;
1577 		/* fall through */
1578 	default:
1579 		return L2CAP_MODE_BASIC;
1580 	}
1581 }
1582 
1583 int l2cap_build_conf_req(struct sock *sk, void *data)
1584 {
1585 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1586 	struct l2cap_conf_req *req = data;
1587 	struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1588 	void *ptr = req->data;
1589 
1590 	BT_DBG("sk %p", sk);
1591 
1592 	if (pi->num_conf_req || pi->num_conf_rsp)
1593 		goto done;
1594 
1595 	switch (pi->mode) {
1596 	case L2CAP_MODE_STREAMING:
1597 	case L2CAP_MODE_ERTM:
1598 		if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1599 			break;
1600 
1601 		/* fall through */
1602 	default:
1603 		pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1604 		break;
1605 	}
1606 
1607 done:
1608 	if (pi->imtu != L2CAP_DEFAULT_MTU)
1609 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1610 
1611 	switch (pi->mode) {
1612 	case L2CAP_MODE_BASIC:
1613 		if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1614 				!(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1615 			break;
1616 
1617 		rfc.mode            = L2CAP_MODE_BASIC;
1618 		rfc.txwin_size      = 0;
1619 		rfc.max_transmit    = 0;
1620 		rfc.retrans_timeout = 0;
1621 		rfc.monitor_timeout = 0;
1622 		rfc.max_pdu_size    = 0;
1623 
1624 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1625 							(unsigned long) &rfc);
1626 		break;
1627 
1628 	case L2CAP_MODE_ERTM:
1629 		rfc.mode            = L2CAP_MODE_ERTM;
1630 		rfc.txwin_size      = pi->tx_win;
1631 		rfc.max_transmit    = pi->max_tx;
1632 		rfc.retrans_timeout = 0;
1633 		rfc.monitor_timeout = 0;
1634 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1635 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1636 			rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1637 
1638 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1639 							(unsigned long) &rfc);
1640 
1641 		if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1642 			break;
1643 
1644 		if (pi->fcs == L2CAP_FCS_NONE ||
1645 				pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1646 			pi->fcs = L2CAP_FCS_NONE;
1647 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1648 		}
1649 		break;
1650 
1651 	case L2CAP_MODE_STREAMING:
1652 		rfc.mode            = L2CAP_MODE_STREAMING;
1653 		rfc.txwin_size      = 0;
1654 		rfc.max_transmit    = 0;
1655 		rfc.retrans_timeout = 0;
1656 		rfc.monitor_timeout = 0;
1657 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1658 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1659 			rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1660 
1661 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1662 							(unsigned long) &rfc);
1663 
1664 		if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1665 			break;
1666 
1667 		if (pi->fcs == L2CAP_FCS_NONE ||
1668 				pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1669 			pi->fcs = L2CAP_FCS_NONE;
1670 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1671 		}
1672 		break;
1673 	}
1674 
1675 	req->dcid  = cpu_to_le16(pi->dcid);
1676 	req->flags = cpu_to_le16(0);
1677 
1678 	return ptr - data;
1679 }
1680 
1681 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1682 {
1683 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1684 	struct l2cap_conf_rsp *rsp = data;
1685 	void *ptr = rsp->data;
1686 	void *req = pi->conf_req;
1687 	int len = pi->conf_len;
1688 	int type, hint, olen;
1689 	unsigned long val;
1690 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1691 	u16 mtu = L2CAP_DEFAULT_MTU;
1692 	u16 result = L2CAP_CONF_SUCCESS;
1693 
1694 	BT_DBG("sk %p", sk);
1695 
1696 	while (len >= L2CAP_CONF_OPT_SIZE) {
1697 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1698 
1699 		hint  = type & L2CAP_CONF_HINT;
1700 		type &= L2CAP_CONF_MASK;
1701 
1702 		switch (type) {
1703 		case L2CAP_CONF_MTU:
1704 			mtu = val;
1705 			break;
1706 
1707 		case L2CAP_CONF_FLUSH_TO:
1708 			pi->flush_to = val;
1709 			break;
1710 
1711 		case L2CAP_CONF_QOS:
1712 			break;
1713 
1714 		case L2CAP_CONF_RFC:
1715 			if (olen == sizeof(rfc))
1716 				memcpy(&rfc, (void *) val, olen);
1717 			break;
1718 
1719 		case L2CAP_CONF_FCS:
1720 			if (val == L2CAP_FCS_NONE)
1721 				pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1722 
1723 			break;
1724 
1725 		default:
1726 			if (hint)
1727 				break;
1728 
1729 			result = L2CAP_CONF_UNKNOWN;
1730 			*((u8 *) ptr++) = type;
1731 			break;
1732 		}
1733 	}
1734 
1735 	if (pi->num_conf_rsp || pi->num_conf_req > 1)
1736 		goto done;
1737 
1738 	switch (pi->mode) {
1739 	case L2CAP_MODE_STREAMING:
1740 	case L2CAP_MODE_ERTM:
1741 		if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1742 			pi->mode = l2cap_select_mode(rfc.mode,
1743 					pi->conn->feat_mask);
1744 			break;
1745 		}
1746 
1747 		if (pi->mode != rfc.mode)
1748 			return -ECONNREFUSED;
1749 
1750 		break;
1751 	}
1752 
1753 done:
1754 	if (pi->mode != rfc.mode) {
1755 		result = L2CAP_CONF_UNACCEPT;
1756 		rfc.mode = pi->mode;
1757 
1758 		if (pi->num_conf_rsp == 1)
1759 			return -ECONNREFUSED;
1760 
1761 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1762 					sizeof(rfc), (unsigned long) &rfc);
1763 	}
1764 
1765 
1766 	if (result == L2CAP_CONF_SUCCESS) {
1767 		/* Configure output options and let the other side know
1768 		 * which ones we don't like. */
1769 
1770 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
1771 			result = L2CAP_CONF_UNACCEPT;
1772 		else {
1773 			pi->omtu = mtu;
1774 			pi->conf_state |= L2CAP_CONF_MTU_DONE;
1775 		}
1776 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1777 
1778 		switch (rfc.mode) {
1779 		case L2CAP_MODE_BASIC:
1780 			pi->fcs = L2CAP_FCS_NONE;
1781 			pi->conf_state |= L2CAP_CONF_MODE_DONE;
1782 			break;
1783 
1784 		case L2CAP_MODE_ERTM:
1785 			pi->remote_tx_win = rfc.txwin_size;
1786 			pi->remote_max_tx = rfc.max_transmit;
1787 
1788 			if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1789 				rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1790 
1791 			pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1792 
1793 			rfc.retrans_timeout =
1794 				le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1795 			rfc.monitor_timeout =
1796 				le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1797 
1798 			pi->conf_state |= L2CAP_CONF_MODE_DONE;
1799 
1800 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1801 					sizeof(rfc), (unsigned long) &rfc);
1802 
1803 			break;
1804 
1805 		case L2CAP_MODE_STREAMING:
1806 			if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1807 				rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1808 
1809 			pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1810 
1811 			pi->conf_state |= L2CAP_CONF_MODE_DONE;
1812 
1813 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1814 					sizeof(rfc), (unsigned long) &rfc);
1815 
1816 			break;
1817 
1818 		default:
1819 			result = L2CAP_CONF_UNACCEPT;
1820 
1821 			memset(&rfc, 0, sizeof(rfc));
1822 			rfc.mode = pi->mode;
1823 		}
1824 
1825 		if (result == L2CAP_CONF_SUCCESS)
1826 			pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1827 	}
1828 	rsp->scid   = cpu_to_le16(pi->dcid);
1829 	rsp->result = cpu_to_le16(result);
1830 	rsp->flags  = cpu_to_le16(0x0000);
1831 
1832 	return ptr - data;
1833 }
1834 
1835 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1836 {
1837 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1838 	struct l2cap_conf_req *req = data;
1839 	void *ptr = req->data;
1840 	int type, olen;
1841 	unsigned long val;
1842 	struct l2cap_conf_rfc rfc;
1843 
1844 	BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1845 
1846 	while (len >= L2CAP_CONF_OPT_SIZE) {
1847 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1848 
1849 		switch (type) {
1850 		case L2CAP_CONF_MTU:
1851 			if (val < L2CAP_DEFAULT_MIN_MTU) {
1852 				*result = L2CAP_CONF_UNACCEPT;
1853 				pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1854 			} else
1855 				pi->imtu = val;
1856 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1857 			break;
1858 
1859 		case L2CAP_CONF_FLUSH_TO:
1860 			pi->flush_to = val;
1861 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1862 							2, pi->flush_to);
1863 			break;
1864 
1865 		case L2CAP_CONF_RFC:
1866 			if (olen == sizeof(rfc))
1867 				memcpy(&rfc, (void *)val, olen);
1868 
1869 			if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1870 							rfc.mode != pi->mode)
1871 				return -ECONNREFUSED;
1872 
1873 			pi->fcs = 0;
1874 
1875 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1876 					sizeof(rfc), (unsigned long) &rfc);
1877 			break;
1878 		}
1879 	}
1880 
1881 	if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1882 		return -ECONNREFUSED;
1883 
1884 	pi->mode = rfc.mode;
1885 
1886 	if (*result == L2CAP_CONF_SUCCESS) {
1887 		switch (rfc.mode) {
1888 		case L2CAP_MODE_ERTM:
1889 			pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1890 			pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1891 			pi->mps    = le16_to_cpu(rfc.max_pdu_size);
1892 			break;
1893 		case L2CAP_MODE_STREAMING:
1894 			pi->mps    = le16_to_cpu(rfc.max_pdu_size);
1895 		}
1896 	}
1897 
1898 	req->dcid   = cpu_to_le16(pi->dcid);
1899 	req->flags  = cpu_to_le16(0x0000);
1900 
1901 	return ptr - data;
1902 }
1903 
1904 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1905 {
1906 	struct l2cap_conf_rsp *rsp = data;
1907 	void *ptr = rsp->data;
1908 
1909 	BT_DBG("sk %p", sk);
1910 
1911 	rsp->scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
1912 	rsp->result = cpu_to_le16(result);
1913 	rsp->flags  = cpu_to_le16(flags);
1914 
1915 	return ptr - data;
1916 }
1917 
1918 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1919 {
1920 	struct l2cap_pinfo *pi = l2cap_pi(sk);
1921 	int type, olen;
1922 	unsigned long val;
1923 	struct l2cap_conf_rfc rfc;
1924 
1925 	BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1926 
1927 	if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1928 		return;
1929 
1930 	while (len >= L2CAP_CONF_OPT_SIZE) {
1931 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1932 
1933 		switch (type) {
1934 		case L2CAP_CONF_RFC:
1935 			if (olen == sizeof(rfc))
1936 				memcpy(&rfc, (void *)val, olen);
1937 			goto done;
1938 		}
1939 	}
1940 
1941 done:
1942 	switch (rfc.mode) {
1943 	case L2CAP_MODE_ERTM:
1944 		pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1945 		pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1946 		pi->mps    = le16_to_cpu(rfc.max_pdu_size);
1947 		break;
1948 	case L2CAP_MODE_STREAMING:
1949 		pi->mps    = le16_to_cpu(rfc.max_pdu_size);
1950 	}
1951 }
1952 
1953 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1954 {
1955 	struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1956 
1957 	if (rej->reason != 0x0000)
1958 		return 0;
1959 
1960 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1961 					cmd->ident == conn->info_ident) {
1962 		del_timer(&conn->info_timer);
1963 
1964 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1965 		conn->info_ident = 0;
1966 
1967 		l2cap_conn_start(conn);
1968 	}
1969 
1970 	return 0;
1971 }
1972 
1973 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1974 {
1975 	struct l2cap_chan_list *list = &conn->chan_list;
1976 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1977 	struct l2cap_conn_rsp rsp;
1978 	struct sock *parent, *sk = NULL;
1979 	int result, status = L2CAP_CS_NO_INFO;
1980 
1981 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1982 	__le16 psm = req->psm;
1983 
1984 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1985 
1986 	/* Check if we have socket listening on psm */
1987 	parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1988 	if (!parent) {
1989 		result = L2CAP_CR_BAD_PSM;
1990 		goto sendresp;
1991 	}
1992 
1993 	bh_lock_sock(parent);
1994 
1995 	/* Check if the ACL is secure enough (if not SDP) */
1996 	if (psm != cpu_to_le16(0x0001) &&
1997 				!hci_conn_check_link_mode(conn->hcon)) {
1998 		conn->disc_reason = 0x05;
1999 		result = L2CAP_CR_SEC_BLOCK;
2000 		goto response;
2001 	}
2002 
2003 	result = L2CAP_CR_NO_MEM;
2004 
2005 	/* Check for backlog size */
2006 	if (sk_acceptq_is_full(parent)) {
2007 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
2008 		goto response;
2009 	}
2010 
2011 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2012 	if (!sk)
2013 		goto response;
2014 
2015 	write_lock_bh(&list->lock);
2016 
2017 	/* Check if we already have channel with that dcid */
2018 	if (__l2cap_get_chan_by_dcid(list, scid)) {
2019 		write_unlock_bh(&list->lock);
2020 		sock_set_flag(sk, SOCK_ZAPPED);
2021 		l2cap_sock_kill(sk);
2022 		goto response;
2023 	}
2024 
2025 	hci_conn_hold(conn->hcon);
2026 
2027 	l2cap_sock_init(sk, parent);
2028 	bacpy(&bt_sk(sk)->src, conn->src);
2029 	bacpy(&bt_sk(sk)->dst, conn->dst);
2030 	l2cap_pi(sk)->psm  = psm;
2031 	l2cap_pi(sk)->dcid = scid;
2032 
2033 	__l2cap_chan_add(conn, sk, parent);
2034 	dcid = l2cap_pi(sk)->scid;
2035 
2036 	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2037 
2038 	l2cap_pi(sk)->ident = cmd->ident;
2039 
2040 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2041 		if (l2cap_check_security(sk)) {
2042 			if (bt_sk(sk)->defer_setup) {
2043 				sk->sk_state = BT_CONNECT2;
2044 				result = L2CAP_CR_PEND;
2045 				status = L2CAP_CS_AUTHOR_PEND;
2046 				parent->sk_data_ready(parent, 0);
2047 			} else {
2048 				sk->sk_state = BT_CONFIG;
2049 				result = L2CAP_CR_SUCCESS;
2050 				status = L2CAP_CS_NO_INFO;
2051 			}
2052 		} else {
2053 			sk->sk_state = BT_CONNECT2;
2054 			result = L2CAP_CR_PEND;
2055 			status = L2CAP_CS_AUTHEN_PEND;
2056 		}
2057 	} else {
2058 		sk->sk_state = BT_CONNECT2;
2059 		result = L2CAP_CR_PEND;
2060 		status = L2CAP_CS_NO_INFO;
2061 	}
2062 
2063 	write_unlock_bh(&list->lock);
2064 
2065 response:
2066 	bh_unlock_sock(parent);
2067 
2068 sendresp:
2069 	rsp.scid   = cpu_to_le16(scid);
2070 	rsp.dcid   = cpu_to_le16(dcid);
2071 	rsp.result = cpu_to_le16(result);
2072 	rsp.status = cpu_to_le16(status);
2073 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2074 
2075 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2076 		struct l2cap_info_req info;
2077 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2078 
2079 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2080 		conn->info_ident = l2cap_get_ident(conn);
2081 
2082 		mod_timer(&conn->info_timer, jiffies +
2083 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2084 
2085 		l2cap_send_cmd(conn, conn->info_ident,
2086 					L2CAP_INFO_REQ, sizeof(info), &info);
2087 	}
2088 
2089 	if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2090 				result == L2CAP_CR_SUCCESS) {
2091 		u8 buf[128];
2092 		l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2093 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2094 					l2cap_build_conf_req(sk, buf), buf);
2095 		l2cap_pi(sk)->num_conf_req++;
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2102 {
2103 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2104 	u16 scid, dcid, result, status;
2105 	struct sock *sk;
2106 	u8 req[128];
2107 
2108 	scid   = __le16_to_cpu(rsp->scid);
2109 	dcid   = __le16_to_cpu(rsp->dcid);
2110 	result = __le16_to_cpu(rsp->result);
2111 	status = __le16_to_cpu(rsp->status);
2112 
2113 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2114 
2115 	if (scid) {
2116 		sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2117 		if (!sk)
2118 			return -EFAULT;
2119 	} else {
2120 		sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2121 		if (!sk)
2122 			return -EFAULT;
2123 	}
2124 
2125 	switch (result) {
2126 	case L2CAP_CR_SUCCESS:
2127 		sk->sk_state = BT_CONFIG;
2128 		l2cap_pi(sk)->ident = 0;
2129 		l2cap_pi(sk)->dcid = dcid;
2130 		l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2131 
2132 		if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2133 			break;
2134 
2135 		l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2136 
2137 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2138 					l2cap_build_conf_req(sk, req), req);
2139 		l2cap_pi(sk)->num_conf_req++;
2140 		break;
2141 
2142 	case L2CAP_CR_PEND:
2143 		l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2144 		break;
2145 
2146 	default:
2147 		/* don't delete l2cap channel if sk is owned by user */
2148 		if (sock_owned_by_user(sk)) {
2149 			sk->sk_state = BT_DISCONN;
2150 			l2cap_sock_clear_timer(sk);
2151 			l2cap_sock_set_timer(sk, HZ / 5);
2152 			break;
2153 		}
2154 
2155 		l2cap_chan_del(sk, ECONNREFUSED);
2156 		break;
2157 	}
2158 
2159 	bh_unlock_sock(sk);
2160 	return 0;
2161 }
2162 
2163 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2164 {
2165 	/* FCS is enabled only in ERTM or streaming mode, if one or both
2166 	 * sides request it.
2167 	 */
2168 	if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2169 		pi->fcs = L2CAP_FCS_NONE;
2170 	else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2171 		pi->fcs = L2CAP_FCS_CRC16;
2172 }
2173 
2174 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2175 {
2176 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2177 	u16 dcid, flags;
2178 	u8 rsp[64];
2179 	struct sock *sk;
2180 	int len;
2181 
2182 	dcid  = __le16_to_cpu(req->dcid);
2183 	flags = __le16_to_cpu(req->flags);
2184 
2185 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2186 
2187 	sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2188 	if (!sk)
2189 		return -ENOENT;
2190 
2191 	if (sk->sk_state != BT_CONFIG) {
2192 		struct l2cap_cmd_rej rej;
2193 
2194 		rej.reason = cpu_to_le16(0x0002);
2195 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2196 				sizeof(rej), &rej);
2197 		goto unlock;
2198 	}
2199 
2200 	/* Reject if config buffer is too small. */
2201 	len = cmd_len - sizeof(*req);
2202 	if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2203 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2204 				l2cap_build_conf_rsp(sk, rsp,
2205 					L2CAP_CONF_REJECT, flags), rsp);
2206 		goto unlock;
2207 	}
2208 
2209 	/* Store config. */
2210 	memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2211 	l2cap_pi(sk)->conf_len += len;
2212 
2213 	if (flags & 0x0001) {
2214 		/* Incomplete config. Send empty response. */
2215 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2216 				l2cap_build_conf_rsp(sk, rsp,
2217 					L2CAP_CONF_SUCCESS, 0x0001), rsp);
2218 		goto unlock;
2219 	}
2220 
2221 	/* Complete config. */
2222 	len = l2cap_parse_conf_req(sk, rsp);
2223 	if (len < 0) {
2224 		l2cap_send_disconn_req(conn, sk, ECONNRESET);
2225 		goto unlock;
2226 	}
2227 
2228 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2229 	l2cap_pi(sk)->num_conf_rsp++;
2230 
2231 	/* Reset config buffer. */
2232 	l2cap_pi(sk)->conf_len = 0;
2233 
2234 	if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2235 		goto unlock;
2236 
2237 	if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2238 		set_default_fcs(l2cap_pi(sk));
2239 
2240 		sk->sk_state = BT_CONNECTED;
2241 
2242 		l2cap_pi(sk)->next_tx_seq = 0;
2243 		l2cap_pi(sk)->expected_tx_seq = 0;
2244 		__skb_queue_head_init(TX_QUEUE(sk));
2245 		if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2246 			l2cap_ertm_init(sk);
2247 
2248 		l2cap_chan_ready(sk);
2249 		goto unlock;
2250 	}
2251 
2252 	if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2253 		u8 buf[64];
2254 		l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2255 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2256 					l2cap_build_conf_req(sk, buf), buf);
2257 		l2cap_pi(sk)->num_conf_req++;
2258 	}
2259 
2260 unlock:
2261 	bh_unlock_sock(sk);
2262 	return 0;
2263 }
2264 
2265 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2266 {
2267 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2268 	u16 scid, flags, result;
2269 	struct sock *sk;
2270 	int len = cmd->len - sizeof(*rsp);
2271 
2272 	scid   = __le16_to_cpu(rsp->scid);
2273 	flags  = __le16_to_cpu(rsp->flags);
2274 	result = __le16_to_cpu(rsp->result);
2275 
2276 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2277 			scid, flags, result);
2278 
2279 	sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2280 	if (!sk)
2281 		return 0;
2282 
2283 	switch (result) {
2284 	case L2CAP_CONF_SUCCESS:
2285 		l2cap_conf_rfc_get(sk, rsp->data, len);
2286 		break;
2287 
2288 	case L2CAP_CONF_UNACCEPT:
2289 		if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2290 			char req[64];
2291 
2292 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2293 				l2cap_send_disconn_req(conn, sk, ECONNRESET);
2294 				goto done;
2295 			}
2296 
2297 			/* throw out any old stored conf requests */
2298 			result = L2CAP_CONF_SUCCESS;
2299 			len = l2cap_parse_conf_rsp(sk, rsp->data,
2300 							len, req, &result);
2301 			if (len < 0) {
2302 				l2cap_send_disconn_req(conn, sk, ECONNRESET);
2303 				goto done;
2304 			}
2305 
2306 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
2307 						L2CAP_CONF_REQ, len, req);
2308 			l2cap_pi(sk)->num_conf_req++;
2309 			if (result != L2CAP_CONF_SUCCESS)
2310 				goto done;
2311 			break;
2312 		}
2313 
2314 	default:
2315 		sk->sk_err = ECONNRESET;
2316 		l2cap_sock_set_timer(sk, HZ * 5);
2317 		l2cap_send_disconn_req(conn, sk, ECONNRESET);
2318 		goto done;
2319 	}
2320 
2321 	if (flags & 0x01)
2322 		goto done;
2323 
2324 	l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2325 
2326 	if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2327 		set_default_fcs(l2cap_pi(sk));
2328 
2329 		sk->sk_state = BT_CONNECTED;
2330 		l2cap_pi(sk)->next_tx_seq = 0;
2331 		l2cap_pi(sk)->expected_tx_seq = 0;
2332 		__skb_queue_head_init(TX_QUEUE(sk));
2333 		if (l2cap_pi(sk)->mode ==  L2CAP_MODE_ERTM)
2334 			l2cap_ertm_init(sk);
2335 
2336 		l2cap_chan_ready(sk);
2337 	}
2338 
2339 done:
2340 	bh_unlock_sock(sk);
2341 	return 0;
2342 }
2343 
2344 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2345 {
2346 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2347 	struct l2cap_disconn_rsp rsp;
2348 	u16 dcid, scid;
2349 	struct sock *sk;
2350 
2351 	scid = __le16_to_cpu(req->scid);
2352 	dcid = __le16_to_cpu(req->dcid);
2353 
2354 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2355 
2356 	sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2357 	if (!sk)
2358 		return 0;
2359 
2360 	rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2361 	rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2362 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2363 
2364 	sk->sk_shutdown = SHUTDOWN_MASK;
2365 
2366 	/* don't delete l2cap channel if sk is owned by user */
2367 	if (sock_owned_by_user(sk)) {
2368 		sk->sk_state = BT_DISCONN;
2369 		l2cap_sock_clear_timer(sk);
2370 		l2cap_sock_set_timer(sk, HZ / 5);
2371 		bh_unlock_sock(sk);
2372 		return 0;
2373 	}
2374 
2375 	l2cap_chan_del(sk, ECONNRESET);
2376 	bh_unlock_sock(sk);
2377 
2378 	l2cap_sock_kill(sk);
2379 	return 0;
2380 }
2381 
2382 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2383 {
2384 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2385 	u16 dcid, scid;
2386 	struct sock *sk;
2387 
2388 	scid = __le16_to_cpu(rsp->scid);
2389 	dcid = __le16_to_cpu(rsp->dcid);
2390 
2391 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2392 
2393 	sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2394 	if (!sk)
2395 		return 0;
2396 
2397 	/* don't delete l2cap channel if sk is owned by user */
2398 	if (sock_owned_by_user(sk)) {
2399 		sk->sk_state = BT_DISCONN;
2400 		l2cap_sock_clear_timer(sk);
2401 		l2cap_sock_set_timer(sk, HZ / 5);
2402 		bh_unlock_sock(sk);
2403 		return 0;
2404 	}
2405 
2406 	l2cap_chan_del(sk, 0);
2407 	bh_unlock_sock(sk);
2408 
2409 	l2cap_sock_kill(sk);
2410 	return 0;
2411 }
2412 
2413 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2414 {
2415 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2416 	u16 type;
2417 
2418 	type = __le16_to_cpu(req->type);
2419 
2420 	BT_DBG("type 0x%4.4x", type);
2421 
2422 	if (type == L2CAP_IT_FEAT_MASK) {
2423 		u8 buf[8];
2424 		u32 feat_mask = l2cap_feat_mask;
2425 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2426 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2427 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2428 		if (!disable_ertm)
2429 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2430 							 | L2CAP_FEAT_FCS;
2431 		put_unaligned_le32(feat_mask, rsp->data);
2432 		l2cap_send_cmd(conn, cmd->ident,
2433 					L2CAP_INFO_RSP, sizeof(buf), buf);
2434 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2435 		u8 buf[12];
2436 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2437 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2438 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2439 		memcpy(buf + 4, l2cap_fixed_chan, 8);
2440 		l2cap_send_cmd(conn, cmd->ident,
2441 					L2CAP_INFO_RSP, sizeof(buf), buf);
2442 	} else {
2443 		struct l2cap_info_rsp rsp;
2444 		rsp.type   = cpu_to_le16(type);
2445 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2446 		l2cap_send_cmd(conn, cmd->ident,
2447 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2448 	}
2449 
2450 	return 0;
2451 }
2452 
2453 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2454 {
2455 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2456 	u16 type, result;
2457 
2458 	type   = __le16_to_cpu(rsp->type);
2459 	result = __le16_to_cpu(rsp->result);
2460 
2461 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2462 
2463 	del_timer(&conn->info_timer);
2464 
2465 	if (result != L2CAP_IR_SUCCESS) {
2466 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2467 		conn->info_ident = 0;
2468 
2469 		l2cap_conn_start(conn);
2470 
2471 		return 0;
2472 	}
2473 
2474 	if (type == L2CAP_IT_FEAT_MASK) {
2475 		conn->feat_mask = get_unaligned_le32(rsp->data);
2476 
2477 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2478 			struct l2cap_info_req req;
2479 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2480 
2481 			conn->info_ident = l2cap_get_ident(conn);
2482 
2483 			l2cap_send_cmd(conn, conn->info_ident,
2484 					L2CAP_INFO_REQ, sizeof(req), &req);
2485 		} else {
2486 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2487 			conn->info_ident = 0;
2488 
2489 			l2cap_conn_start(conn);
2490 		}
2491 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2492 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2493 		conn->info_ident = 0;
2494 
2495 		l2cap_conn_start(conn);
2496 	}
2497 
2498 	return 0;
2499 }
2500 
2501 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2502 							u16 to_multiplier)
2503 {
2504 	u16 max_latency;
2505 
2506 	if (min > max || min < 6 || max > 3200)
2507 		return -EINVAL;
2508 
2509 	if (to_multiplier < 10 || to_multiplier > 3200)
2510 		return -EINVAL;
2511 
2512 	if (max >= to_multiplier * 8)
2513 		return -EINVAL;
2514 
2515 	max_latency = (to_multiplier * 8 / max) - 1;
2516 	if (latency > 499 || latency > max_latency)
2517 		return -EINVAL;
2518 
2519 	return 0;
2520 }
2521 
2522 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2523 					struct l2cap_cmd_hdr *cmd, u8 *data)
2524 {
2525 	struct hci_conn *hcon = conn->hcon;
2526 	struct l2cap_conn_param_update_req *req;
2527 	struct l2cap_conn_param_update_rsp rsp;
2528 	u16 min, max, latency, to_multiplier, cmd_len;
2529 	int err;
2530 
2531 	if (!(hcon->link_mode & HCI_LM_MASTER))
2532 		return -EINVAL;
2533 
2534 	cmd_len = __le16_to_cpu(cmd->len);
2535 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2536 		return -EPROTO;
2537 
2538 	req = (struct l2cap_conn_param_update_req *) data;
2539 	min		= __le16_to_cpu(req->min);
2540 	max		= __le16_to_cpu(req->max);
2541 	latency		= __le16_to_cpu(req->latency);
2542 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
2543 
2544 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2545 						min, max, latency, to_multiplier);
2546 
2547 	memset(&rsp, 0, sizeof(rsp));
2548 
2549 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2550 	if (err)
2551 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2552 	else
2553 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2554 
2555 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2556 							sizeof(rsp), &rsp);
2557 
2558 	if (!err)
2559 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2560 
2561 	return 0;
2562 }
2563 
2564 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2565 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2566 {
2567 	int err = 0;
2568 
2569 	switch (cmd->code) {
2570 	case L2CAP_COMMAND_REJ:
2571 		l2cap_command_rej(conn, cmd, data);
2572 		break;
2573 
2574 	case L2CAP_CONN_REQ:
2575 		err = l2cap_connect_req(conn, cmd, data);
2576 		break;
2577 
2578 	case L2CAP_CONN_RSP:
2579 		err = l2cap_connect_rsp(conn, cmd, data);
2580 		break;
2581 
2582 	case L2CAP_CONF_REQ:
2583 		err = l2cap_config_req(conn, cmd, cmd_len, data);
2584 		break;
2585 
2586 	case L2CAP_CONF_RSP:
2587 		err = l2cap_config_rsp(conn, cmd, data);
2588 		break;
2589 
2590 	case L2CAP_DISCONN_REQ:
2591 		err = l2cap_disconnect_req(conn, cmd, data);
2592 		break;
2593 
2594 	case L2CAP_DISCONN_RSP:
2595 		err = l2cap_disconnect_rsp(conn, cmd, data);
2596 		break;
2597 
2598 	case L2CAP_ECHO_REQ:
2599 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2600 		break;
2601 
2602 	case L2CAP_ECHO_RSP:
2603 		break;
2604 
2605 	case L2CAP_INFO_REQ:
2606 		err = l2cap_information_req(conn, cmd, data);
2607 		break;
2608 
2609 	case L2CAP_INFO_RSP:
2610 		err = l2cap_information_rsp(conn, cmd, data);
2611 		break;
2612 
2613 	default:
2614 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2615 		err = -EINVAL;
2616 		break;
2617 	}
2618 
2619 	return err;
2620 }
2621 
2622 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2623 					struct l2cap_cmd_hdr *cmd, u8 *data)
2624 {
2625 	switch (cmd->code) {
2626 	case L2CAP_COMMAND_REJ:
2627 		return 0;
2628 
2629 	case L2CAP_CONN_PARAM_UPDATE_REQ:
2630 		return l2cap_conn_param_update_req(conn, cmd, data);
2631 
2632 	case L2CAP_CONN_PARAM_UPDATE_RSP:
2633 		return 0;
2634 
2635 	default:
2636 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2637 		return -EINVAL;
2638 	}
2639 }
2640 
2641 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2642 							struct sk_buff *skb)
2643 {
2644 	u8 *data = skb->data;
2645 	int len = skb->len;
2646 	struct l2cap_cmd_hdr cmd;
2647 	int err;
2648 
2649 	l2cap_raw_recv(conn, skb);
2650 
2651 	while (len >= L2CAP_CMD_HDR_SIZE) {
2652 		u16 cmd_len;
2653 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2654 		data += L2CAP_CMD_HDR_SIZE;
2655 		len  -= L2CAP_CMD_HDR_SIZE;
2656 
2657 		cmd_len = le16_to_cpu(cmd.len);
2658 
2659 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2660 
2661 		if (cmd_len > len || !cmd.ident) {
2662 			BT_DBG("corrupted command");
2663 			break;
2664 		}
2665 
2666 		if (conn->hcon->type == LE_LINK)
2667 			err = l2cap_le_sig_cmd(conn, &cmd, data);
2668 		else
2669 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2670 
2671 		if (err) {
2672 			struct l2cap_cmd_rej rej;
2673 			BT_DBG("error %d", err);
2674 
2675 			/* FIXME: Map err to a valid reason */
2676 			rej.reason = cpu_to_le16(0);
2677 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2678 		}
2679 
2680 		data += cmd_len;
2681 		len  -= cmd_len;
2682 	}
2683 
2684 	kfree_skb(skb);
2685 }
2686 
2687 static int l2cap_check_fcs(struct l2cap_pinfo *pi,  struct sk_buff *skb)
2688 {
2689 	u16 our_fcs, rcv_fcs;
2690 	int hdr_size = L2CAP_HDR_SIZE + 2;
2691 
2692 	if (pi->fcs == L2CAP_FCS_CRC16) {
2693 		skb_trim(skb, skb->len - 2);
2694 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2695 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2696 
2697 		if (our_fcs != rcv_fcs)
2698 			return -EBADMSG;
2699 	}
2700 	return 0;
2701 }
2702 
2703 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2704 {
2705 	struct l2cap_pinfo *pi = l2cap_pi(sk);
2706 	u16 control = 0;
2707 
2708 	pi->frames_sent = 0;
2709 
2710 	control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2711 
2712 	if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2713 		control |= L2CAP_SUPER_RCV_NOT_READY;
2714 		l2cap_send_sframe(pi, control);
2715 		pi->conn_state |= L2CAP_CONN_RNR_SENT;
2716 	}
2717 
2718 	if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2719 		l2cap_retransmit_frames(sk);
2720 
2721 	l2cap_ertm_send(sk);
2722 
2723 	if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2724 			pi->frames_sent == 0) {
2725 		control |= L2CAP_SUPER_RCV_READY;
2726 		l2cap_send_sframe(pi, control);
2727 	}
2728 }
2729 
2730 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2731 {
2732 	struct sk_buff *next_skb;
2733 	struct l2cap_pinfo *pi = l2cap_pi(sk);
2734 	int tx_seq_offset, next_tx_seq_offset;
2735 
2736 	bt_cb(skb)->tx_seq = tx_seq;
2737 	bt_cb(skb)->sar = sar;
2738 
2739 	next_skb = skb_peek(SREJ_QUEUE(sk));
2740 	if (!next_skb) {
2741 		__skb_queue_tail(SREJ_QUEUE(sk), skb);
2742 		return 0;
2743 	}
2744 
2745 	tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2746 	if (tx_seq_offset < 0)
2747 		tx_seq_offset += 64;
2748 
2749 	do {
2750 		if (bt_cb(next_skb)->tx_seq == tx_seq)
2751 			return -EINVAL;
2752 
2753 		next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2754 						pi->buffer_seq) % 64;
2755 		if (next_tx_seq_offset < 0)
2756 			next_tx_seq_offset += 64;
2757 
2758 		if (next_tx_seq_offset > tx_seq_offset) {
2759 			__skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2760 			return 0;
2761 		}
2762 
2763 		if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2764 			break;
2765 
2766 	} while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2767 
2768 	__skb_queue_tail(SREJ_QUEUE(sk), skb);
2769 
2770 	return 0;
2771 }
2772 
2773 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2774 {
2775 	struct l2cap_pinfo *pi = l2cap_pi(sk);
2776 	struct sk_buff *_skb;
2777 	int err;
2778 
2779 	switch (control & L2CAP_CTRL_SAR) {
2780 	case L2CAP_SDU_UNSEGMENTED:
2781 		if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2782 			goto drop;
2783 
2784 		err = sock_queue_rcv_skb(sk, skb);
2785 		if (!err)
2786 			return err;
2787 
2788 		break;
2789 
2790 	case L2CAP_SDU_START:
2791 		if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2792 			goto drop;
2793 
2794 		pi->sdu_len = get_unaligned_le16(skb->data);
2795 
2796 		if (pi->sdu_len > pi->imtu)
2797 			goto disconnect;
2798 
2799 		pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2800 		if (!pi->sdu)
2801 			return -ENOMEM;
2802 
2803 		/* pull sdu_len bytes only after alloc, because of Local Busy
2804 		 * condition we have to be sure that this will be executed
2805 		 * only once, i.e., when alloc does not fail */
2806 		skb_pull(skb, 2);
2807 
2808 		memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2809 
2810 		pi->conn_state |= L2CAP_CONN_SAR_SDU;
2811 		pi->partial_sdu_len = skb->len;
2812 		break;
2813 
2814 	case L2CAP_SDU_CONTINUE:
2815 		if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2816 			goto disconnect;
2817 
2818 		if (!pi->sdu)
2819 			goto disconnect;
2820 
2821 		pi->partial_sdu_len += skb->len;
2822 		if (pi->partial_sdu_len > pi->sdu_len)
2823 			goto drop;
2824 
2825 		memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2826 
2827 		break;
2828 
2829 	case L2CAP_SDU_END:
2830 		if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2831 			goto disconnect;
2832 
2833 		if (!pi->sdu)
2834 			goto disconnect;
2835 
2836 		if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2837 			pi->partial_sdu_len += skb->len;
2838 
2839 			if (pi->partial_sdu_len > pi->imtu)
2840 				goto drop;
2841 
2842 			if (pi->partial_sdu_len != pi->sdu_len)
2843 				goto drop;
2844 
2845 			memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2846 		}
2847 
2848 		_skb = skb_clone(pi->sdu, GFP_ATOMIC);
2849 		if (!_skb) {
2850 			pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2851 			return -ENOMEM;
2852 		}
2853 
2854 		err = sock_queue_rcv_skb(sk, _skb);
2855 		if (err < 0) {
2856 			kfree_skb(_skb);
2857 			pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2858 			return err;
2859 		}
2860 
2861 		pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2862 		pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2863 
2864 		kfree_skb(pi->sdu);
2865 		break;
2866 	}
2867 
2868 	kfree_skb(skb);
2869 	return 0;
2870 
2871 drop:
2872 	kfree_skb(pi->sdu);
2873 	pi->sdu = NULL;
2874 
2875 disconnect:
2876 	l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2877 	kfree_skb(skb);
2878 	return 0;
2879 }
2880 
2881 static int l2cap_try_push_rx_skb(struct sock *sk)
2882 {
2883 	struct l2cap_pinfo *pi = l2cap_pi(sk);
2884 	struct sk_buff *skb;
2885 	u16 control;
2886 	int err;
2887 
2888 	while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2889 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2890 		err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2891 		if (err < 0) {
2892 			skb_queue_head(BUSY_QUEUE(sk), skb);
2893 			return -EBUSY;
2894 		}
2895 
2896 		pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2897 	}
2898 
2899 	if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2900 		goto done;
2901 
2902 	control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2903 	control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2904 	l2cap_send_sframe(pi, control);
2905 	l2cap_pi(sk)->retry_count = 1;
2906 
2907 	del_timer(&pi->retrans_timer);
2908 	__mod_monitor_timer();
2909 
2910 	l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2911 
2912 done:
2913 	pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2914 	pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2915 
2916 	BT_DBG("sk %p, Exit local busy", sk);
2917 
2918 	return 0;
2919 }
2920 
2921 static void l2cap_busy_work(struct work_struct *work)
2922 {
2923 	DECLARE_WAITQUEUE(wait, current);
2924 	struct l2cap_pinfo *pi =
2925 		container_of(work, struct l2cap_pinfo, busy_work);
2926 	struct sock *sk = (struct sock *)pi;
2927 	int n_tries = 0, timeo = HZ/5, err;
2928 	struct sk_buff *skb;
2929 
2930 	lock_sock(sk);
2931 
2932 	add_wait_queue(sk_sleep(sk), &wait);
2933 	while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2934 		set_current_state(TASK_INTERRUPTIBLE);
2935 
2936 		if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2937 			err = -EBUSY;
2938 			l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2939 			break;
2940 		}
2941 
2942 		if (!timeo)
2943 			timeo = HZ/5;
2944 
2945 		if (signal_pending(current)) {
2946 			err = sock_intr_errno(timeo);
2947 			break;
2948 		}
2949 
2950 		release_sock(sk);
2951 		timeo = schedule_timeout(timeo);
2952 		lock_sock(sk);
2953 
2954 		err = sock_error(sk);
2955 		if (err)
2956 			break;
2957 
2958 		if (l2cap_try_push_rx_skb(sk) == 0)
2959 			break;
2960 	}
2961 
2962 	set_current_state(TASK_RUNNING);
2963 	remove_wait_queue(sk_sleep(sk), &wait);
2964 
2965 	release_sock(sk);
2966 }
2967 
2968 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2969 {
2970 	struct l2cap_pinfo *pi = l2cap_pi(sk);
2971 	int sctrl, err;
2972 
2973 	if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2974 		bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2975 		__skb_queue_tail(BUSY_QUEUE(sk), skb);
2976 		return l2cap_try_push_rx_skb(sk);
2977 
2978 
2979 	}
2980 
2981 	err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2982 	if (err >= 0) {
2983 		pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2984 		return err;
2985 	}
2986 
2987 	/* Busy Condition */
2988 	BT_DBG("sk %p, Enter local busy", sk);
2989 
2990 	pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2991 	bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2992 	__skb_queue_tail(BUSY_QUEUE(sk), skb);
2993 
2994 	sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2995 	sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2996 	l2cap_send_sframe(pi, sctrl);
2997 
2998 	pi->conn_state |= L2CAP_CONN_RNR_SENT;
2999 
3000 	del_timer(&pi->ack_timer);
3001 
3002 	queue_work(_busy_wq, &pi->busy_work);
3003 
3004 	return err;
3005 }
3006 
3007 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3008 {
3009 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3010 	struct sk_buff *_skb;
3011 	int err = -EINVAL;
3012 
3013 	/*
3014 	 * TODO: We have to notify the userland if some data is lost with the
3015 	 * Streaming Mode.
3016 	 */
3017 
3018 	switch (control & L2CAP_CTRL_SAR) {
3019 	case L2CAP_SDU_UNSEGMENTED:
3020 		if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3021 			kfree_skb(pi->sdu);
3022 			break;
3023 		}
3024 
3025 		err = sock_queue_rcv_skb(sk, skb);
3026 		if (!err)
3027 			return 0;
3028 
3029 		break;
3030 
3031 	case L2CAP_SDU_START:
3032 		if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3033 			kfree_skb(pi->sdu);
3034 			break;
3035 		}
3036 
3037 		pi->sdu_len = get_unaligned_le16(skb->data);
3038 		skb_pull(skb, 2);
3039 
3040 		if (pi->sdu_len > pi->imtu) {
3041 			err = -EMSGSIZE;
3042 			break;
3043 		}
3044 
3045 		pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3046 		if (!pi->sdu) {
3047 			err = -ENOMEM;
3048 			break;
3049 		}
3050 
3051 		memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3052 
3053 		pi->conn_state |= L2CAP_CONN_SAR_SDU;
3054 		pi->partial_sdu_len = skb->len;
3055 		err = 0;
3056 		break;
3057 
3058 	case L2CAP_SDU_CONTINUE:
3059 		if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3060 			break;
3061 
3062 		memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3063 
3064 		pi->partial_sdu_len += skb->len;
3065 		if (pi->partial_sdu_len > pi->sdu_len)
3066 			kfree_skb(pi->sdu);
3067 		else
3068 			err = 0;
3069 
3070 		break;
3071 
3072 	case L2CAP_SDU_END:
3073 		if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3074 			break;
3075 
3076 		memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3077 
3078 		pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3079 		pi->partial_sdu_len += skb->len;
3080 
3081 		if (pi->partial_sdu_len > pi->imtu)
3082 			goto drop;
3083 
3084 		if (pi->partial_sdu_len == pi->sdu_len) {
3085 			_skb = skb_clone(pi->sdu, GFP_ATOMIC);
3086 			err = sock_queue_rcv_skb(sk, _skb);
3087 			if (err < 0)
3088 				kfree_skb(_skb);
3089 		}
3090 		err = 0;
3091 
3092 drop:
3093 		kfree_skb(pi->sdu);
3094 		break;
3095 	}
3096 
3097 	kfree_skb(skb);
3098 	return err;
3099 }
3100 
3101 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3102 {
3103 	struct sk_buff *skb;
3104 	u16 control;
3105 
3106 	while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3107 		if (bt_cb(skb)->tx_seq != tx_seq)
3108 			break;
3109 
3110 		skb = skb_dequeue(SREJ_QUEUE(sk));
3111 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3112 		l2cap_ertm_reassembly_sdu(sk, skb, control);
3113 		l2cap_pi(sk)->buffer_seq_srej =
3114 			(l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3115 		tx_seq = (tx_seq + 1) % 64;
3116 	}
3117 }
3118 
3119 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3120 {
3121 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3122 	struct srej_list *l, *tmp;
3123 	u16 control;
3124 
3125 	list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3126 		if (l->tx_seq == tx_seq) {
3127 			list_del(&l->list);
3128 			kfree(l);
3129 			return;
3130 		}
3131 		control = L2CAP_SUPER_SELECT_REJECT;
3132 		control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3133 		l2cap_send_sframe(pi, control);
3134 		list_del(&l->list);
3135 		list_add_tail(&l->list, SREJ_LIST(sk));
3136 	}
3137 }
3138 
3139 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3140 {
3141 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3142 	struct srej_list *new;
3143 	u16 control;
3144 
3145 	while (tx_seq != pi->expected_tx_seq) {
3146 		control = L2CAP_SUPER_SELECT_REJECT;
3147 		control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3148 		l2cap_send_sframe(pi, control);
3149 
3150 		new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3151 		new->tx_seq = pi->expected_tx_seq;
3152 		pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3153 		list_add_tail(&new->list, SREJ_LIST(sk));
3154 	}
3155 	pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3156 }
3157 
3158 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3159 {
3160 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3161 	u8 tx_seq = __get_txseq(rx_control);
3162 	u8 req_seq = __get_reqseq(rx_control);
3163 	u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3164 	int tx_seq_offset, expected_tx_seq_offset;
3165 	int num_to_ack = (pi->tx_win/6) + 1;
3166 	int err = 0;
3167 
3168 	BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3169 								rx_control);
3170 
3171 	if (L2CAP_CTRL_FINAL & rx_control &&
3172 			l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3173 		del_timer(&pi->monitor_timer);
3174 		if (pi->unacked_frames > 0)
3175 			__mod_retrans_timer();
3176 		pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3177 	}
3178 
3179 	pi->expected_ack_seq = req_seq;
3180 	l2cap_drop_acked_frames(sk);
3181 
3182 	if (tx_seq == pi->expected_tx_seq)
3183 		goto expected;
3184 
3185 	tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3186 	if (tx_seq_offset < 0)
3187 		tx_seq_offset += 64;
3188 
3189 	/* invalid tx_seq */
3190 	if (tx_seq_offset >= pi->tx_win) {
3191 		l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3192 		goto drop;
3193 	}
3194 
3195 	if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3196 		goto drop;
3197 
3198 	if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3199 		struct srej_list *first;
3200 
3201 		first = list_first_entry(SREJ_LIST(sk),
3202 				struct srej_list, list);
3203 		if (tx_seq == first->tx_seq) {
3204 			l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3205 			l2cap_check_srej_gap(sk, tx_seq);
3206 
3207 			list_del(&first->list);
3208 			kfree(first);
3209 
3210 			if (list_empty(SREJ_LIST(sk))) {
3211 				pi->buffer_seq = pi->buffer_seq_srej;
3212 				pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3213 				l2cap_send_ack(pi);
3214 				BT_DBG("sk %p, Exit SREJ_SENT", sk);
3215 			}
3216 		} else {
3217 			struct srej_list *l;
3218 
3219 			/* duplicated tx_seq */
3220 			if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3221 				goto drop;
3222 
3223 			list_for_each_entry(l, SREJ_LIST(sk), list) {
3224 				if (l->tx_seq == tx_seq) {
3225 					l2cap_resend_srejframe(sk, tx_seq);
3226 					return 0;
3227 				}
3228 			}
3229 			l2cap_send_srejframe(sk, tx_seq);
3230 		}
3231 	} else {
3232 		expected_tx_seq_offset =
3233 			(pi->expected_tx_seq - pi->buffer_seq) % 64;
3234 		if (expected_tx_seq_offset < 0)
3235 			expected_tx_seq_offset += 64;
3236 
3237 		/* duplicated tx_seq */
3238 		if (tx_seq_offset < expected_tx_seq_offset)
3239 			goto drop;
3240 
3241 		pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3242 
3243 		BT_DBG("sk %p, Enter SREJ", sk);
3244 
3245 		INIT_LIST_HEAD(SREJ_LIST(sk));
3246 		pi->buffer_seq_srej = pi->buffer_seq;
3247 
3248 		__skb_queue_head_init(SREJ_QUEUE(sk));
3249 		__skb_queue_head_init(BUSY_QUEUE(sk));
3250 		l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3251 
3252 		pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3253 
3254 		l2cap_send_srejframe(sk, tx_seq);
3255 
3256 		del_timer(&pi->ack_timer);
3257 	}
3258 	return 0;
3259 
3260 expected:
3261 	pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3262 
3263 	if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3264 		bt_cb(skb)->tx_seq = tx_seq;
3265 		bt_cb(skb)->sar = sar;
3266 		__skb_queue_tail(SREJ_QUEUE(sk), skb);
3267 		return 0;
3268 	}
3269 
3270 	err = l2cap_push_rx_skb(sk, skb, rx_control);
3271 	if (err < 0)
3272 		return 0;
3273 
3274 	if (rx_control & L2CAP_CTRL_FINAL) {
3275 		if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3276 			pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3277 		else
3278 			l2cap_retransmit_frames(sk);
3279 	}
3280 
3281 	__mod_ack_timer();
3282 
3283 	pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3284 	if (pi->num_acked == num_to_ack - 1)
3285 		l2cap_send_ack(pi);
3286 
3287 	return 0;
3288 
3289 drop:
3290 	kfree_skb(skb);
3291 	return 0;
3292 }
3293 
3294 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3295 {
3296 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3297 
3298 	BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3299 						rx_control);
3300 
3301 	pi->expected_ack_seq = __get_reqseq(rx_control);
3302 	l2cap_drop_acked_frames(sk);
3303 
3304 	if (rx_control & L2CAP_CTRL_POLL) {
3305 		pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3306 		if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3307 			if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3308 					(pi->unacked_frames > 0))
3309 				__mod_retrans_timer();
3310 
3311 			pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3312 			l2cap_send_srejtail(sk);
3313 		} else {
3314 			l2cap_send_i_or_rr_or_rnr(sk);
3315 		}
3316 
3317 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3318 		pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3319 
3320 		if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3321 			pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3322 		else
3323 			l2cap_retransmit_frames(sk);
3324 
3325 	} else {
3326 		if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3327 				(pi->unacked_frames > 0))
3328 			__mod_retrans_timer();
3329 
3330 		pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3331 		if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3332 			l2cap_send_ack(pi);
3333 		else
3334 			l2cap_ertm_send(sk);
3335 	}
3336 }
3337 
3338 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3339 {
3340 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3341 	u8 tx_seq = __get_reqseq(rx_control);
3342 
3343 	BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3344 
3345 	pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3346 
3347 	pi->expected_ack_seq = tx_seq;
3348 	l2cap_drop_acked_frames(sk);
3349 
3350 	if (rx_control & L2CAP_CTRL_FINAL) {
3351 		if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3352 			pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3353 		else
3354 			l2cap_retransmit_frames(sk);
3355 	} else {
3356 		l2cap_retransmit_frames(sk);
3357 
3358 		if (pi->conn_state & L2CAP_CONN_WAIT_F)
3359 			pi->conn_state |= L2CAP_CONN_REJ_ACT;
3360 	}
3361 }
3362 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3363 {
3364 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3365 	u8 tx_seq = __get_reqseq(rx_control);
3366 
3367 	BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3368 
3369 	pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3370 
3371 	if (rx_control & L2CAP_CTRL_POLL) {
3372 		pi->expected_ack_seq = tx_seq;
3373 		l2cap_drop_acked_frames(sk);
3374 
3375 		pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3376 		l2cap_retransmit_one_frame(sk, tx_seq);
3377 
3378 		l2cap_ertm_send(sk);
3379 
3380 		if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3381 			pi->srej_save_reqseq = tx_seq;
3382 			pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3383 		}
3384 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3385 		if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3386 				pi->srej_save_reqseq == tx_seq)
3387 			pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3388 		else
3389 			l2cap_retransmit_one_frame(sk, tx_seq);
3390 	} else {
3391 		l2cap_retransmit_one_frame(sk, tx_seq);
3392 		if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3393 			pi->srej_save_reqseq = tx_seq;
3394 			pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3395 		}
3396 	}
3397 }
3398 
3399 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3400 {
3401 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3402 	u8 tx_seq = __get_reqseq(rx_control);
3403 
3404 	BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3405 
3406 	pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3407 	pi->expected_ack_seq = tx_seq;
3408 	l2cap_drop_acked_frames(sk);
3409 
3410 	if (rx_control & L2CAP_CTRL_POLL)
3411 		pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3412 
3413 	if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3414 		del_timer(&pi->retrans_timer);
3415 		if (rx_control & L2CAP_CTRL_POLL)
3416 			l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3417 		return;
3418 	}
3419 
3420 	if (rx_control & L2CAP_CTRL_POLL)
3421 		l2cap_send_srejtail(sk);
3422 	else
3423 		l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3424 }
3425 
3426 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3427 {
3428 	BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3429 
3430 	if (L2CAP_CTRL_FINAL & rx_control &&
3431 			l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3432 		del_timer(&l2cap_pi(sk)->monitor_timer);
3433 		if (l2cap_pi(sk)->unacked_frames > 0)
3434 			__mod_retrans_timer();
3435 		l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3436 	}
3437 
3438 	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3439 	case L2CAP_SUPER_RCV_READY:
3440 		l2cap_data_channel_rrframe(sk, rx_control);
3441 		break;
3442 
3443 	case L2CAP_SUPER_REJECT:
3444 		l2cap_data_channel_rejframe(sk, rx_control);
3445 		break;
3446 
3447 	case L2CAP_SUPER_SELECT_REJECT:
3448 		l2cap_data_channel_srejframe(sk, rx_control);
3449 		break;
3450 
3451 	case L2CAP_SUPER_RCV_NOT_READY:
3452 		l2cap_data_channel_rnrframe(sk, rx_control);
3453 		break;
3454 	}
3455 
3456 	kfree_skb(skb);
3457 	return 0;
3458 }
3459 
3460 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3461 {
3462 	struct l2cap_pinfo *pi = l2cap_pi(sk);
3463 	u16 control;
3464 	u8 req_seq;
3465 	int len, next_tx_seq_offset, req_seq_offset;
3466 
3467 	control = get_unaligned_le16(skb->data);
3468 	skb_pull(skb, 2);
3469 	len = skb->len;
3470 
3471 	/*
3472 	 * We can just drop the corrupted I-frame here.
3473 	 * Receiver will miss it and start proper recovery
3474 	 * procedures and ask retransmission.
3475 	 */
3476 	if (l2cap_check_fcs(pi, skb))
3477 		goto drop;
3478 
3479 	if (__is_sar_start(control) && __is_iframe(control))
3480 		len -= 2;
3481 
3482 	if (pi->fcs == L2CAP_FCS_CRC16)
3483 		len -= 2;
3484 
3485 	if (len > pi->mps) {
3486 		l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3487 		goto drop;
3488 	}
3489 
3490 	req_seq = __get_reqseq(control);
3491 	req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3492 	if (req_seq_offset < 0)
3493 		req_seq_offset += 64;
3494 
3495 	next_tx_seq_offset =
3496 		(pi->next_tx_seq - pi->expected_ack_seq) % 64;
3497 	if (next_tx_seq_offset < 0)
3498 		next_tx_seq_offset += 64;
3499 
3500 	/* check for invalid req-seq */
3501 	if (req_seq_offset > next_tx_seq_offset) {
3502 		l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3503 		goto drop;
3504 	}
3505 
3506 	if (__is_iframe(control)) {
3507 		if (len < 0) {
3508 			l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3509 			goto drop;
3510 		}
3511 
3512 		l2cap_data_channel_iframe(sk, control, skb);
3513 	} else {
3514 		if (len != 0) {
3515 			BT_ERR("%d", len);
3516 			l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3517 			goto drop;
3518 		}
3519 
3520 		l2cap_data_channel_sframe(sk, control, skb);
3521 	}
3522 
3523 	return 0;
3524 
3525 drop:
3526 	kfree_skb(skb);
3527 	return 0;
3528 }
3529 
3530 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3531 {
3532 	struct sock *sk;
3533 	struct l2cap_pinfo *pi;
3534 	u16 control;
3535 	u8 tx_seq;
3536 	int len;
3537 
3538 	sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3539 	if (!sk) {
3540 		BT_DBG("unknown cid 0x%4.4x", cid);
3541 		goto drop;
3542 	}
3543 
3544 	pi = l2cap_pi(sk);
3545 
3546 	BT_DBG("sk %p, len %d", sk, skb->len);
3547 
3548 	if (sk->sk_state != BT_CONNECTED)
3549 		goto drop;
3550 
3551 	switch (pi->mode) {
3552 	case L2CAP_MODE_BASIC:
3553 		/* If socket recv buffers overflows we drop data here
3554 		 * which is *bad* because L2CAP has to be reliable.
3555 		 * But we don't have any other choice. L2CAP doesn't
3556 		 * provide flow control mechanism. */
3557 
3558 		if (pi->imtu < skb->len)
3559 			goto drop;
3560 
3561 		if (!sock_queue_rcv_skb(sk, skb))
3562 			goto done;
3563 		break;
3564 
3565 	case L2CAP_MODE_ERTM:
3566 		if (!sock_owned_by_user(sk)) {
3567 			l2cap_ertm_data_rcv(sk, skb);
3568 		} else {
3569 			if (sk_add_backlog(sk, skb))
3570 				goto drop;
3571 		}
3572 
3573 		goto done;
3574 
3575 	case L2CAP_MODE_STREAMING:
3576 		control = get_unaligned_le16(skb->data);
3577 		skb_pull(skb, 2);
3578 		len = skb->len;
3579 
3580 		if (l2cap_check_fcs(pi, skb))
3581 			goto drop;
3582 
3583 		if (__is_sar_start(control))
3584 			len -= 2;
3585 
3586 		if (pi->fcs == L2CAP_FCS_CRC16)
3587 			len -= 2;
3588 
3589 		if (len > pi->mps || len < 0 || __is_sframe(control))
3590 			goto drop;
3591 
3592 		tx_seq = __get_txseq(control);
3593 
3594 		if (pi->expected_tx_seq == tx_seq)
3595 			pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3596 		else
3597 			pi->expected_tx_seq = (tx_seq + 1) % 64;
3598 
3599 		l2cap_streaming_reassembly_sdu(sk, skb, control);
3600 
3601 		goto done;
3602 
3603 	default:
3604 		BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3605 		break;
3606 	}
3607 
3608 drop:
3609 	kfree_skb(skb);
3610 
3611 done:
3612 	if (sk)
3613 		bh_unlock_sock(sk);
3614 
3615 	return 0;
3616 }
3617 
3618 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3619 {
3620 	struct sock *sk;
3621 
3622 	sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3623 	if (!sk)
3624 		goto drop;
3625 
3626 	bh_lock_sock(sk);
3627 
3628 	BT_DBG("sk %p, len %d", sk, skb->len);
3629 
3630 	if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3631 		goto drop;
3632 
3633 	if (l2cap_pi(sk)->imtu < skb->len)
3634 		goto drop;
3635 
3636 	if (!sock_queue_rcv_skb(sk, skb))
3637 		goto done;
3638 
3639 drop:
3640 	kfree_skb(skb);
3641 
3642 done:
3643 	if (sk)
3644 		bh_unlock_sock(sk);
3645 	return 0;
3646 }
3647 
3648 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3649 {
3650 	struct l2cap_hdr *lh = (void *) skb->data;
3651 	u16 cid, len;
3652 	__le16 psm;
3653 
3654 	skb_pull(skb, L2CAP_HDR_SIZE);
3655 	cid = __le16_to_cpu(lh->cid);
3656 	len = __le16_to_cpu(lh->len);
3657 
3658 	if (len != skb->len) {
3659 		kfree_skb(skb);
3660 		return;
3661 	}
3662 
3663 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
3664 
3665 	switch (cid) {
3666 	case L2CAP_CID_LE_SIGNALING:
3667 	case L2CAP_CID_SIGNALING:
3668 		l2cap_sig_channel(conn, skb);
3669 		break;
3670 
3671 	case L2CAP_CID_CONN_LESS:
3672 		psm = get_unaligned_le16(skb->data);
3673 		skb_pull(skb, 2);
3674 		l2cap_conless_channel(conn, psm, skb);
3675 		break;
3676 
3677 	default:
3678 		l2cap_data_channel(conn, cid, skb);
3679 		break;
3680 	}
3681 }
3682 
3683 /* ---- L2CAP interface with lower layer (HCI) ---- */
3684 
3685 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3686 {
3687 	int exact = 0, lm1 = 0, lm2 = 0;
3688 	register struct sock *sk;
3689 	struct hlist_node *node;
3690 
3691 	if (type != ACL_LINK)
3692 		return -EINVAL;
3693 
3694 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3695 
3696 	/* Find listening sockets and check their link_mode */
3697 	read_lock(&l2cap_sk_list.lock);
3698 	sk_for_each(sk, node, &l2cap_sk_list.head) {
3699 		if (sk->sk_state != BT_LISTEN)
3700 			continue;
3701 
3702 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3703 			lm1 |= HCI_LM_ACCEPT;
3704 			if (l2cap_pi(sk)->role_switch)
3705 				lm1 |= HCI_LM_MASTER;
3706 			exact++;
3707 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3708 			lm2 |= HCI_LM_ACCEPT;
3709 			if (l2cap_pi(sk)->role_switch)
3710 				lm2 |= HCI_LM_MASTER;
3711 		}
3712 	}
3713 	read_unlock(&l2cap_sk_list.lock);
3714 
3715 	return exact ? lm1 : lm2;
3716 }
3717 
3718 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3719 {
3720 	struct l2cap_conn *conn;
3721 
3722 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3723 
3724 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3725 		return -EINVAL;
3726 
3727 	if (!status) {
3728 		conn = l2cap_conn_add(hcon, status);
3729 		if (conn)
3730 			l2cap_conn_ready(conn);
3731 	} else
3732 		l2cap_conn_del(hcon, bt_err(status));
3733 
3734 	return 0;
3735 }
3736 
3737 static int l2cap_disconn_ind(struct hci_conn *hcon)
3738 {
3739 	struct l2cap_conn *conn = hcon->l2cap_data;
3740 
3741 	BT_DBG("hcon %p", hcon);
3742 
3743 	if (hcon->type != ACL_LINK || !conn)
3744 		return 0x13;
3745 
3746 	return conn->disc_reason;
3747 }
3748 
3749 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3750 {
3751 	BT_DBG("hcon %p reason %d", hcon, reason);
3752 
3753 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3754 		return -EINVAL;
3755 
3756 	l2cap_conn_del(hcon, bt_err(reason));
3757 
3758 	return 0;
3759 }
3760 
3761 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3762 {
3763 	if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3764 		return;
3765 
3766 	if (encrypt == 0x00) {
3767 		if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3768 			l2cap_sock_clear_timer(sk);
3769 			l2cap_sock_set_timer(sk, HZ * 5);
3770 		} else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3771 			__l2cap_sock_close(sk, ECONNREFUSED);
3772 	} else {
3773 		if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3774 			l2cap_sock_clear_timer(sk);
3775 	}
3776 }
3777 
3778 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3779 {
3780 	struct l2cap_chan_list *l;
3781 	struct l2cap_conn *conn = hcon->l2cap_data;
3782 	struct sock *sk;
3783 
3784 	if (!conn)
3785 		return 0;
3786 
3787 	l = &conn->chan_list;
3788 
3789 	BT_DBG("conn %p", conn);
3790 
3791 	read_lock(&l->lock);
3792 
3793 	for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3794 		bh_lock_sock(sk);
3795 
3796 		if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3797 			bh_unlock_sock(sk);
3798 			continue;
3799 		}
3800 
3801 		if (!status && (sk->sk_state == BT_CONNECTED ||
3802 						sk->sk_state == BT_CONFIG)) {
3803 			l2cap_check_encryption(sk, encrypt);
3804 			bh_unlock_sock(sk);
3805 			continue;
3806 		}
3807 
3808 		if (sk->sk_state == BT_CONNECT) {
3809 			if (!status) {
3810 				struct l2cap_conn_req req;
3811 				req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3812 				req.psm  = l2cap_pi(sk)->psm;
3813 
3814 				l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3815 				l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3816 
3817 				l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3818 					L2CAP_CONN_REQ, sizeof(req), &req);
3819 			} else {
3820 				l2cap_sock_clear_timer(sk);
3821 				l2cap_sock_set_timer(sk, HZ / 10);
3822 			}
3823 		} else if (sk->sk_state == BT_CONNECT2) {
3824 			struct l2cap_conn_rsp rsp;
3825 			__u16 result;
3826 
3827 			if (!status) {
3828 				sk->sk_state = BT_CONFIG;
3829 				result = L2CAP_CR_SUCCESS;
3830 			} else {
3831 				sk->sk_state = BT_DISCONN;
3832 				l2cap_sock_set_timer(sk, HZ / 10);
3833 				result = L2CAP_CR_SEC_BLOCK;
3834 			}
3835 
3836 			rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
3837 			rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
3838 			rsp.result = cpu_to_le16(result);
3839 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3840 			l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3841 					L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3842 		}
3843 
3844 		bh_unlock_sock(sk);
3845 	}
3846 
3847 	read_unlock(&l->lock);
3848 
3849 	return 0;
3850 }
3851 
3852 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3853 {
3854 	struct l2cap_conn *conn = hcon->l2cap_data;
3855 
3856 	if (!conn)
3857 		conn = l2cap_conn_add(hcon, 0);
3858 
3859 	if (!conn)
3860 		goto drop;
3861 
3862 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3863 
3864 	if (!(flags & ACL_CONT)) {
3865 		struct l2cap_hdr *hdr;
3866 		struct sock *sk;
3867 		u16 cid;
3868 		int len;
3869 
3870 		if (conn->rx_len) {
3871 			BT_ERR("Unexpected start frame (len %d)", skb->len);
3872 			kfree_skb(conn->rx_skb);
3873 			conn->rx_skb = NULL;
3874 			conn->rx_len = 0;
3875 			l2cap_conn_unreliable(conn, ECOMM);
3876 		}
3877 
3878 		/* Start fragment always begin with Basic L2CAP header */
3879 		if (skb->len < L2CAP_HDR_SIZE) {
3880 			BT_ERR("Frame is too short (len %d)", skb->len);
3881 			l2cap_conn_unreliable(conn, ECOMM);
3882 			goto drop;
3883 		}
3884 
3885 		hdr = (struct l2cap_hdr *) skb->data;
3886 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3887 		cid = __le16_to_cpu(hdr->cid);
3888 
3889 		if (len == skb->len) {
3890 			/* Complete frame received */
3891 			l2cap_recv_frame(conn, skb);
3892 			return 0;
3893 		}
3894 
3895 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3896 
3897 		if (skb->len > len) {
3898 			BT_ERR("Frame is too long (len %d, expected len %d)",
3899 				skb->len, len);
3900 			l2cap_conn_unreliable(conn, ECOMM);
3901 			goto drop;
3902 		}
3903 
3904 		sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3905 
3906 		if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3907 			BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3908 					len, l2cap_pi(sk)->imtu);
3909 			bh_unlock_sock(sk);
3910 			l2cap_conn_unreliable(conn, ECOMM);
3911 			goto drop;
3912 		}
3913 
3914 		if (sk)
3915 			bh_unlock_sock(sk);
3916 
3917 		/* Allocate skb for the complete frame (with header) */
3918 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3919 		if (!conn->rx_skb)
3920 			goto drop;
3921 
3922 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3923 								skb->len);
3924 		conn->rx_len = len - skb->len;
3925 	} else {
3926 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3927 
3928 		if (!conn->rx_len) {
3929 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3930 			l2cap_conn_unreliable(conn, ECOMM);
3931 			goto drop;
3932 		}
3933 
3934 		if (skb->len > conn->rx_len) {
3935 			BT_ERR("Fragment is too long (len %d, expected %d)",
3936 					skb->len, conn->rx_len);
3937 			kfree_skb(conn->rx_skb);
3938 			conn->rx_skb = NULL;
3939 			conn->rx_len = 0;
3940 			l2cap_conn_unreliable(conn, ECOMM);
3941 			goto drop;
3942 		}
3943 
3944 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3945 								skb->len);
3946 		conn->rx_len -= skb->len;
3947 
3948 		if (!conn->rx_len) {
3949 			/* Complete frame received */
3950 			l2cap_recv_frame(conn, conn->rx_skb);
3951 			conn->rx_skb = NULL;
3952 		}
3953 	}
3954 
3955 drop:
3956 	kfree_skb(skb);
3957 	return 0;
3958 }
3959 
3960 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3961 {
3962 	struct sock *sk;
3963 	struct hlist_node *node;
3964 
3965 	read_lock_bh(&l2cap_sk_list.lock);
3966 
3967 	sk_for_each(sk, node, &l2cap_sk_list.head) {
3968 		struct l2cap_pinfo *pi = l2cap_pi(sk);
3969 
3970 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3971 					batostr(&bt_sk(sk)->src),
3972 					batostr(&bt_sk(sk)->dst),
3973 					sk->sk_state, __le16_to_cpu(pi->psm),
3974 					pi->scid, pi->dcid,
3975 					pi->imtu, pi->omtu, pi->sec_level,
3976 					pi->mode);
3977 	}
3978 
3979 	read_unlock_bh(&l2cap_sk_list.lock);
3980 
3981 	return 0;
3982 }
3983 
3984 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3985 {
3986 	return single_open(file, l2cap_debugfs_show, inode->i_private);
3987 }
3988 
3989 static const struct file_operations l2cap_debugfs_fops = {
3990 	.open		= l2cap_debugfs_open,
3991 	.read		= seq_read,
3992 	.llseek		= seq_lseek,
3993 	.release	= single_release,
3994 };
3995 
3996 static struct dentry *l2cap_debugfs;
3997 
3998 static struct hci_proto l2cap_hci_proto = {
3999 	.name		= "L2CAP",
4000 	.id		= HCI_PROTO_L2CAP,
4001 	.connect_ind	= l2cap_connect_ind,
4002 	.connect_cfm	= l2cap_connect_cfm,
4003 	.disconn_ind	= l2cap_disconn_ind,
4004 	.disconn_cfm	= l2cap_disconn_cfm,
4005 	.security_cfm	= l2cap_security_cfm,
4006 	.recv_acldata	= l2cap_recv_acldata
4007 };
4008 
4009 int __init l2cap_init(void)
4010 {
4011 	int err;
4012 
4013 	err = l2cap_init_sockets();
4014 	if (err < 0)
4015 		return err;
4016 
4017 	_busy_wq = create_singlethread_workqueue("l2cap");
4018 	if (!_busy_wq) {
4019 		err = -ENOMEM;
4020 		goto error;
4021 	}
4022 
4023 	err = hci_register_proto(&l2cap_hci_proto);
4024 	if (err < 0) {
4025 		BT_ERR("L2CAP protocol registration failed");
4026 		bt_sock_unregister(BTPROTO_L2CAP);
4027 		goto error;
4028 	}
4029 
4030 	if (bt_debugfs) {
4031 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4032 					bt_debugfs, NULL, &l2cap_debugfs_fops);
4033 		if (!l2cap_debugfs)
4034 			BT_ERR("Failed to create L2CAP debug file");
4035 	}
4036 
4037 	return 0;
4038 
4039 error:
4040 	destroy_workqueue(_busy_wq);
4041 	l2cap_cleanup_sockets();
4042 	return err;
4043 }
4044 
4045 void l2cap_exit(void)
4046 {
4047 	debugfs_remove(l2cap_debugfs);
4048 
4049 	flush_workqueue(_busy_wq);
4050 	destroy_workqueue(_busy_wq);
4051 
4052 	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4053 		BT_ERR("L2CAP protocol unregistration failed");
4054 
4055 	l2cap_cleanup_sockets();
4056 }
4057 
4058 module_param(disable_ertm, bool, 0644);
4059 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4060