1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
116 */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c) {
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
126 if (c)
127 l2cap_chan_lock(c);
128 }
129
130 return c;
131 }
132
133 /* Find channel with given DCID.
134 * Returns a reference locked channel.
135 */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 u16 cid)
138 {
139 struct l2cap_chan *c;
140
141 c = __l2cap_get_chan_by_dcid(conn, cid);
142 if (c) {
143 /* Only lock if chan reference is not 0 */
144 c = l2cap_chan_hold_unless_zero(c);
145 if (c)
146 l2cap_chan_lock(c);
147 }
148
149 return c;
150 }
151
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 u8 ident)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &conn->chan_l, list) {
158 if (c->ident == ident)
159 return c;
160 }
161 return NULL;
162 }
163
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 u8 src_type)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 continue;
172
173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 continue;
175
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180 }
181
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 chan->src_type)) {
214 chan->psm = cpu_to_le16(p);
215 chan->sport = cpu_to_le16(p);
216 err = 0;
217 break;
218 }
219 }
220
221 done:
222 write_unlock(&chan_list_lock);
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 {
229 write_lock(&chan_list_lock);
230
231 /* Override the defaults (which are for conn-oriented) */
232 chan->omtu = L2CAP_DEFAULT_MTU;
233 chan->chan_type = L2CAP_CHAN_FIXED;
234
235 chan->scid = scid;
236
237 write_unlock(&chan_list_lock);
238
239 return 0;
240 }
241
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 u16 cid, dyn_end;
245
246 if (conn->hcon->type == LE_LINK)
247 dyn_end = L2CAP_CID_LE_DYN_END;
248 else
249 dyn_end = L2CAP_CID_DYN_END;
250
251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 if (!__l2cap_get_chan_by_scid(conn, cid))
253 return cid;
254 }
255
256 return 0;
257 }
258
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 state_to_string(state));
263
264 chan->state = state;
265 chan->ops->state_change(chan, state, 0);
266 }
267
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 int state, int err)
270 {
271 chan->state = state;
272 chan->ops->state_change(chan, chan->state, err);
273 }
274
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 chan->ops->state_change(chan, chan->state, err);
278 }
279
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 if (!delayed_work_pending(&chan->monitor_timer) &&
283 chan->retrans_timeout) {
284 l2cap_set_timer(chan, &chan->retrans_timer,
285 msecs_to_jiffies(chan->retrans_timeout));
286 }
287 }
288
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 __clear_retrans_timer(chan);
292 if (chan->monitor_timeout) {
293 l2cap_set_timer(chan, &chan->monitor_timer,
294 msecs_to_jiffies(chan->monitor_timeout));
295 }
296 }
297
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 u16 seq)
300 {
301 struct sk_buff *skb;
302
303 skb_queue_walk(head, skb) {
304 if (bt_cb(skb)->l2cap.txseq == seq)
305 return skb;
306 }
307
308 return NULL;
309 }
310
311 /* ---- L2CAP sequence number lists ---- */
312
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314 * SREJ requests that are received and for frames that are to be
315 * retransmitted. These seq_list functions implement a singly-linked
316 * list in an array, where membership in the list can also be checked
317 * in constant time. Items can also be added to the tail of the list
318 * and removed from the head in constant time, without further memory
319 * allocs or frees.
320 */
321
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 size_t alloc_size, i;
325
326 /* Allocated size is a power of 2 to map sequence numbers
327 * (which may be up to 14 bits) in to a smaller array that is
328 * sized for the negotiated ERTM transmit windows.
329 */
330 alloc_size = roundup_pow_of_two(size);
331
332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 if (!seq_list->list)
334 return -ENOMEM;
335
336 seq_list->mask = alloc_size - 1;
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 for (i = 0; i < alloc_size; i++)
340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342 return 0;
343 }
344
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 kfree(seq_list->list);
348 }
349
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 u16 seq)
352 {
353 /* Constant-time check for list membership */
354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 u16 seq = seq_list->head;
360 u16 mask = seq_list->mask;
361
362 seq_list->head = seq_list->list[seq & mask];
363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 }
369
370 return seq;
371 }
372
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 if (!conn)
415 return;
416
417 mutex_lock(&conn->lock);
418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 * this work. No need to call l2cap_chan_hold(chan) here again.
420 */
421 l2cap_chan_lock(chan);
422
423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 reason = ECONNREFUSED;
425 else if (chan->state == BT_CONNECT &&
426 chan->sec_level != BT_SECURITY_SDP)
427 reason = ECONNREFUSED;
428 else
429 reason = ETIMEDOUT;
430
431 l2cap_chan_close(chan, reason);
432
433 chan->ops->close(chan);
434
435 l2cap_chan_unlock(chan);
436 l2cap_chan_put(chan);
437
438 mutex_unlock(&conn->lock);
439 }
440
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 struct l2cap_chan *chan;
444
445 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 if (!chan)
447 return NULL;
448
449 skb_queue_head_init(&chan->tx_q);
450 skb_queue_head_init(&chan->srej_q);
451 mutex_init(&chan->lock);
452
453 /* Set default lock nesting level */
454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456 /* Available receive buffer space is initially unknown */
457 chan->rx_avail = -1;
458
459 write_lock(&chan_list_lock);
460 list_add(&chan->global_l, &chan_list);
461 write_unlock(&chan_list_lock);
462
463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468 chan->state = BT_OPEN;
469
470 kref_init(&chan->kref);
471
472 /* This flag is cleared in l2cap_chan_ready() */
473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475 BT_DBG("chan %p", chan);
476
477 return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485 BT_DBG("chan %p", chan);
486
487 write_lock(&chan_list_lock);
488 list_del(&chan->global_l);
489 write_unlock(&chan_list_lock);
490
491 kfree(chan);
492 }
493
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498 kref_get(&c->kref);
499 }
500
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504
505 if (!kref_get_unless_zero(&c->kref))
506 return NULL;
507
508 return c;
509 }
510
l2cap_chan_put(struct l2cap_chan * c)511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514
515 kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518
l2cap_chan_set_defaults(struct l2cap_chan * chan)519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 chan->fcs = L2CAP_FCS_CRC16;
522 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 chan->remote_max_tx = chan->max_tx;
526 chan->remote_tx_win = chan->tx_win;
527 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 chan->sec_level = BT_SECURITY_LOW;
529 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532
533 chan->conf_state = 0;
534 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535
536 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539
l2cap_le_rx_credits(struct l2cap_chan * chan)540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543
544 if (chan->mps == 0)
545 return 0;
546
547 /* If we don't know the available space in the receiver buffer, give
548 * enough credits for a full packet.
549 */
550 if (chan->rx_avail == -1)
551 return (chan->imtu / chan->mps) + 1;
552
553 /* If we know how much space is available in the receive buffer, give
554 * out as many credits as would fill the buffer.
555 */
556 if (chan->rx_avail <= sdu_len)
557 return 0;
558
559 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 chan->sdu = NULL;
565 chan->sdu_last_frag = NULL;
566 chan->sdu_len = 0;
567 chan->tx_credits = tx_credits;
568 /* Derive MPS from connection MTU to stop HCI fragmentation */
569 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 chan->rx_credits = l2cap_le_rx_credits(chan);
571
572 skb_queue_head_init(&chan->tx_q);
573 }
574
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 l2cap_le_flowctl_init(chan, tx_credits);
578
579 /* L2CAP implementations shall support a minimum MPS of 64 octets */
580 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 chan->mps = L2CAP_ECRED_MIN_MPS;
582 chan->rx_credits = l2cap_le_rx_credits(chan);
583 }
584 }
585
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 __le16_to_cpu(chan->psm), chan->dcid);
590
591 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592
593 chan->conn = conn;
594
595 switch (chan->chan_type) {
596 case L2CAP_CHAN_CONN_ORIENTED:
597 /* Alloc CID for connection-oriented socket */
598 chan->scid = l2cap_alloc_cid(conn);
599 if (conn->hcon->type == ACL_LINK)
600 chan->omtu = L2CAP_DEFAULT_MTU;
601 break;
602
603 case L2CAP_CHAN_CONN_LESS:
604 /* Connectionless socket */
605 chan->scid = L2CAP_CID_CONN_LESS;
606 chan->dcid = L2CAP_CID_CONN_LESS;
607 chan->omtu = L2CAP_DEFAULT_MTU;
608 break;
609
610 case L2CAP_CHAN_FIXED:
611 /* Caller will set CID and CID specific MTU values */
612 break;
613
614 default:
615 /* Raw socket can send/recv signalling messages only */
616 chan->scid = L2CAP_CID_SIGNALING;
617 chan->dcid = L2CAP_CID_SIGNALING;
618 chan->omtu = L2CAP_DEFAULT_MTU;
619 }
620
621 chan->local_id = L2CAP_BESTEFFORT_ID;
622 chan->local_stype = L2CAP_SERV_BESTEFFORT;
623 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
624 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
625 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
626 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
627
628 l2cap_chan_hold(chan);
629
630 /* Only keep a reference for fixed channels if they requested it */
631 if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 hci_conn_hold(conn->hcon);
634
635 /* Append to the list since the order matters for ECRED */
636 list_add_tail(&chan->list, &conn->chan_l);
637 }
638
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 mutex_lock(&conn->lock);
642 __l2cap_chan_add(conn, chan);
643 mutex_unlock(&conn->lock);
644 }
645
l2cap_chan_del(struct l2cap_chan * chan,int err)646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 struct l2cap_conn *conn = chan->conn;
649
650 __clear_chan_timer(chan);
651
652 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 state_to_string(chan->state));
654
655 chan->ops->teardown(chan, err);
656
657 if (conn) {
658 /* Delete from channel list */
659 list_del(&chan->list);
660
661 l2cap_chan_put(chan);
662
663 chan->conn = NULL;
664
665 /* Reference was only held for non-fixed channels or
666 * fixed channels that explicitly requested it using the
667 * FLAG_HOLD_HCI_CONN flag.
668 */
669 if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 hci_conn_drop(conn->hcon);
672 }
673
674 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 return;
676
677 switch (chan->mode) {
678 case L2CAP_MODE_BASIC:
679 break;
680
681 case L2CAP_MODE_LE_FLOWCTL:
682 case L2CAP_MODE_EXT_FLOWCTL:
683 skb_queue_purge(&chan->tx_q);
684 break;
685
686 case L2CAP_MODE_ERTM:
687 __clear_retrans_timer(chan);
688 __clear_monitor_timer(chan);
689 __clear_ack_timer(chan);
690
691 skb_queue_purge(&chan->srej_q);
692
693 l2cap_seq_list_free(&chan->srej_list);
694 l2cap_seq_list_free(&chan->retrans_list);
695 fallthrough;
696
697 case L2CAP_MODE_STREAMING:
698 skb_queue_purge(&chan->tx_q);
699 break;
700 }
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 l2cap_chan_func_t func, void *data)
706 {
707 struct l2cap_chan *chan, *l;
708
709 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 if (chan->ident == id)
711 func(chan, data);
712 }
713 }
714
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 void *data)
717 {
718 struct l2cap_chan *chan;
719
720 list_for_each_entry(chan, &conn->chan_l, list) {
721 func(chan, data);
722 }
723 }
724
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 void *data)
727 {
728 if (!conn)
729 return;
730
731 mutex_lock(&conn->lock);
732 __l2cap_chan_list(conn, func, data);
733 mutex_unlock(&conn->lock);
734 }
735
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737
l2cap_conn_update_id_addr(struct work_struct * work)738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 id_addr_timer.work);
742 struct hci_conn *hcon = conn->hcon;
743 struct l2cap_chan *chan;
744
745 mutex_lock(&conn->lock);
746
747 list_for_each_entry(chan, &conn->chan_l, list) {
748 l2cap_chan_lock(chan);
749 bacpy(&chan->dst, &hcon->dst);
750 chan->dst_type = bdaddr_dst_type(hcon);
751 l2cap_chan_unlock(chan);
752 }
753
754 mutex_unlock(&conn->lock);
755 }
756
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 struct l2cap_conn *conn = chan->conn;
760 struct l2cap_le_conn_rsp rsp;
761 u16 result;
762
763 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 result = L2CAP_CR_LE_AUTHORIZATION;
765 else
766 result = L2CAP_CR_LE_BAD_PSM;
767
768 l2cap_state_change(chan, BT_DISCONN);
769
770 rsp.dcid = cpu_to_le16(chan->scid);
771 rsp.mtu = cpu_to_le16(chan->imtu);
772 rsp.mps = cpu_to_le16(chan->mps);
773 rsp.credits = cpu_to_le16(chan->rx_credits);
774 rsp.result = cpu_to_le16(result);
775
776 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 &rsp);
778 }
779
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 l2cap_state_change(chan, BT_DISCONN);
783
784 __l2cap_ecred_conn_rsp_defer(chan);
785 }
786
l2cap_chan_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 struct l2cap_conn *conn = chan->conn;
790 struct l2cap_conn_rsp rsp;
791 u16 result;
792
793 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 result = L2CAP_CR_SEC_BLOCK;
795 else
796 result = L2CAP_CR_BAD_PSM;
797
798 l2cap_state_change(chan, BT_DISCONN);
799
800 rsp.scid = cpu_to_le16(chan->dcid);
801 rsp.dcid = cpu_to_le16(chan->scid);
802 rsp.result = cpu_to_le16(result);
803 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804
805 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807
l2cap_chan_close(struct l2cap_chan * chan,int reason)808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 struct l2cap_conn *conn = chan->conn;
811
812 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813
814 switch (chan->state) {
815 case BT_LISTEN:
816 chan->ops->teardown(chan, 0);
817 break;
818
819 case BT_CONNECTED:
820 case BT_CONFIG:
821 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 l2cap_send_disconn_req(chan, reason);
824 } else
825 l2cap_chan_del(chan, reason);
826 break;
827
828 case BT_CONNECT2:
829 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 if (conn->hcon->type == ACL_LINK)
831 l2cap_chan_connect_reject(chan);
832 else if (conn->hcon->type == LE_LINK) {
833 switch (chan->mode) {
834 case L2CAP_MODE_LE_FLOWCTL:
835 l2cap_chan_le_connect_reject(chan);
836 break;
837 case L2CAP_MODE_EXT_FLOWCTL:
838 l2cap_chan_ecred_connect_reject(chan);
839 return;
840 }
841 }
842 }
843
844 l2cap_chan_del(chan, reason);
845 break;
846
847 case BT_CONNECT:
848 case BT_DISCONN:
849 l2cap_chan_del(chan, reason);
850 break;
851
852 default:
853 chan->ops->teardown(chan, 0);
854 break;
855 }
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858
l2cap_get_auth_type(struct l2cap_chan * chan)859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 switch (chan->chan_type) {
862 case L2CAP_CHAN_RAW:
863 switch (chan->sec_level) {
864 case BT_SECURITY_HIGH:
865 case BT_SECURITY_FIPS:
866 return HCI_AT_DEDICATED_BONDING_MITM;
867 case BT_SECURITY_MEDIUM:
868 return HCI_AT_DEDICATED_BONDING;
869 default:
870 return HCI_AT_NO_BONDING;
871 }
872 break;
873 case L2CAP_CHAN_CONN_LESS:
874 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 if (chan->sec_level == BT_SECURITY_LOW)
876 chan->sec_level = BT_SECURITY_SDP;
877 }
878 if (chan->sec_level == BT_SECURITY_HIGH ||
879 chan->sec_level == BT_SECURITY_FIPS)
880 return HCI_AT_NO_BONDING_MITM;
881 else
882 return HCI_AT_NO_BONDING;
883 break;
884 case L2CAP_CHAN_CONN_ORIENTED:
885 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 if (chan->sec_level == BT_SECURITY_LOW)
887 chan->sec_level = BT_SECURITY_SDP;
888
889 if (chan->sec_level == BT_SECURITY_HIGH ||
890 chan->sec_level == BT_SECURITY_FIPS)
891 return HCI_AT_NO_BONDING_MITM;
892 else
893 return HCI_AT_NO_BONDING;
894 }
895 fallthrough;
896
897 default:
898 switch (chan->sec_level) {
899 case BT_SECURITY_HIGH:
900 case BT_SECURITY_FIPS:
901 return HCI_AT_GENERAL_BONDING_MITM;
902 case BT_SECURITY_MEDIUM:
903 return HCI_AT_GENERAL_BONDING;
904 default:
905 return HCI_AT_NO_BONDING;
906 }
907 break;
908 }
909 }
910
911 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 struct l2cap_conn *conn = chan->conn;
915 __u8 auth_type;
916
917 if (conn->hcon->type == LE_LINK)
918 return smp_conn_security(conn->hcon, chan->sec_level);
919
920 auth_type = l2cap_get_auth_type(chan);
921
922 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 initiator);
924 }
925
l2cap_get_ident(struct l2cap_conn * conn)926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 u8 id;
929
930 /* Get next available identificator.
931 * 1 - 128 are used by kernel.
932 * 129 - 199 are reserved.
933 * 200 - 254 are used by utilities like l2ping, etc.
934 */
935
936 mutex_lock(&conn->ident_lock);
937
938 if (++conn->tx_ident > 128)
939 conn->tx_ident = 1;
940
941 id = conn->tx_ident;
942
943 mutex_unlock(&conn->ident_lock);
944
945 return id;
946 }
947
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 u8 flags)
950 {
951 /* Check if the hcon still valid before attempting to send */
952 if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 hci_send_acl(conn->hchan, skb, flags);
954 else
955 kfree_skb(skb);
956 }
957
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 void *data)
960 {
961 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 u8 flags;
963
964 BT_DBG("code 0x%2.2x", code);
965
966 if (!skb)
967 return;
968
969 /* Use NO_FLUSH if supported or we have an LE link (which does
970 * not support auto-flushing packets) */
971 if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 conn->hcon->type == LE_LINK)
973 flags = ACL_START_NO_FLUSH;
974 else
975 flags = ACL_START;
976
977 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 skb->priority = HCI_PRIO_MAX;
979
980 l2cap_send_acl(conn, skb, flags);
981 }
982
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 struct hci_conn *hcon = chan->conn->hcon;
986 u16 flags;
987
988 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 skb->priority);
990
991 /* Use NO_FLUSH for LE links (where this is the only option) or
992 * if the BR/EDR link supports it and flushing has not been
993 * explicitly requested (through FLAG_FLUSHABLE).
994 */
995 if (hcon->type == LE_LINK ||
996 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 lmp_no_flush_capable(hcon->hdev)))
998 flags = ACL_START_NO_FLUSH;
999 else
1000 flags = ACL_START;
1001
1002 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010
1011 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 /* S-Frame */
1013 control->sframe = 1;
1014 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016
1017 control->sar = 0;
1018 control->txseq = 0;
1019 } else {
1020 /* I-Frame */
1021 control->sframe = 0;
1022 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024
1025 control->poll = 0;
1026 control->super = 0;
1027 }
1028 }
1029
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034
1035 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 /* S-Frame */
1037 control->sframe = 1;
1038 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040
1041 control->sar = 0;
1042 control->txseq = 0;
1043 } else {
1044 /* I-Frame */
1045 control->sframe = 0;
1046 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048
1049 control->poll = 0;
1050 control->super = 0;
1051 }
1052 }
1053
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 struct sk_buff *skb)
1056 {
1057 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 __unpack_extended_control(get_unaligned_le32(skb->data),
1059 &bt_cb(skb)->l2cap);
1060 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 } else {
1062 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 &bt_cb(skb)->l2cap);
1064 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 }
1066 }
1067
__pack_extended_control(struct l2cap_ctrl * control)1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 u32 packed;
1071
1072 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074
1075 if (control->sframe) {
1076 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 } else {
1080 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 }
1083
1084 return packed;
1085 }
1086
__pack_enhanced_control(struct l2cap_ctrl * control)1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 u16 packed;
1090
1091 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093
1094 if (control->sframe) {
1095 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 packed |= L2CAP_CTRL_FRAME_TYPE;
1098 } else {
1099 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 }
1102
1103 return packed;
1104 }
1105
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 struct l2cap_ctrl *control,
1108 struct sk_buff *skb)
1109 {
1110 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 put_unaligned_le32(__pack_extended_control(control),
1112 skb->data + L2CAP_HDR_SIZE);
1113 } else {
1114 put_unaligned_le16(__pack_enhanced_control(control),
1115 skb->data + L2CAP_HDR_SIZE);
1116 }
1117 }
1118
__ertm_hdr_size(struct l2cap_chan * chan)1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 return L2CAP_EXT_HDR_SIZE;
1123 else
1124 return L2CAP_ENH_HDR_SIZE;
1125 }
1126
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 u32 control)
1129 {
1130 struct sk_buff *skb;
1131 struct l2cap_hdr *lh;
1132 int hlen = __ertm_hdr_size(chan);
1133
1134 if (chan->fcs == L2CAP_FCS_CRC16)
1135 hlen += L2CAP_FCS_SIZE;
1136
1137 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138
1139 if (!skb)
1140 return ERR_PTR(-ENOMEM);
1141
1142 lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 lh->cid = cpu_to_le16(chan->dcid);
1145
1146 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 else
1149 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150
1151 if (chan->fcs == L2CAP_FCS_CRC16) {
1152 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 }
1155
1156 skb->priority = HCI_PRIO_MAX;
1157 return skb;
1158 }
1159
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 struct l2cap_ctrl *control)
1162 {
1163 struct sk_buff *skb;
1164 u32 control_field;
1165
1166 BT_DBG("chan %p, control %p", chan, control);
1167
1168 if (!control->sframe)
1169 return;
1170
1171 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 !control->poll)
1173 control->final = 1;
1174
1175 if (control->super == L2CAP_SUPER_RR)
1176 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 else if (control->super == L2CAP_SUPER_RNR)
1178 set_bit(CONN_RNR_SENT, &chan->conn_state);
1179
1180 if (control->super != L2CAP_SUPER_SREJ) {
1181 chan->last_acked_seq = control->reqseq;
1182 __clear_ack_timer(chan);
1183 }
1184
1185 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 control->final, control->poll, control->super);
1187
1188 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 control_field = __pack_extended_control(control);
1190 else
1191 control_field = __pack_enhanced_control(control);
1192
1193 skb = l2cap_create_sframe_pdu(chan, control_field);
1194 if (!IS_ERR(skb))
1195 l2cap_do_send(chan, skb);
1196 }
1197
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 struct l2cap_ctrl control;
1201
1202 BT_DBG("chan %p, poll %d", chan, poll);
1203
1204 memset(&control, 0, sizeof(control));
1205 control.sframe = 1;
1206 control.poll = poll;
1207
1208 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 control.super = L2CAP_SUPER_RNR;
1210 else
1211 control.super = L2CAP_SUPER_RR;
1212
1213 control.reqseq = chan->buffer_seq;
1214 l2cap_send_sframe(chan, &control);
1215 }
1216
__l2cap_no_conn_pending(struct l2cap_chan * chan)1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 return true;
1221
1222 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224
l2cap_send_conn_req(struct l2cap_chan * chan)1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 struct l2cap_conn *conn = chan->conn;
1228 struct l2cap_conn_req req;
1229
1230 req.scid = cpu_to_le16(chan->scid);
1231 req.psm = chan->psm;
1232
1233 chan->ident = l2cap_get_ident(conn);
1234
1235 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236
1237 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239
l2cap_chan_ready(struct l2cap_chan * chan)1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 /* The channel may have already been flagged as connected in
1243 * case of receiving data before the L2CAP info req/rsp
1244 * procedure is complete.
1245 */
1246 if (chan->state == BT_CONNECTED)
1247 return;
1248
1249 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 chan->conf_state = 0;
1251 __clear_chan_timer(chan);
1252
1253 switch (chan->mode) {
1254 case L2CAP_MODE_LE_FLOWCTL:
1255 case L2CAP_MODE_EXT_FLOWCTL:
1256 if (!chan->tx_credits)
1257 chan->ops->suspend(chan);
1258 break;
1259 }
1260
1261 chan->state = BT_CONNECTED;
1262
1263 chan->ops->ready(chan);
1264 }
1265
l2cap_le_connect(struct l2cap_chan * chan)1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
1274 if (!chan->imtu)
1275 chan->imtu = chan->conn->mtu;
1276
1277 l2cap_le_flowctl_init(chan, 0);
1278
1279 memset(&req, 0, sizeof(req));
1280 req.psm = chan->psm;
1281 req.scid = cpu_to_le16(chan->scid);
1282 req.mtu = cpu_to_le16(chan->imtu);
1283 req.mps = cpu_to_le16(chan->mps);
1284 req.credits = cpu_to_le16(chan->rx_credits);
1285
1286 chan->ident = l2cap_get_ident(conn);
1287
1288 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 sizeof(req), &req);
1290 }
1291
1292 struct l2cap_ecred_conn_data {
1293 struct {
1294 struct l2cap_ecred_conn_req req;
1295 __le16 scid[5];
1296 } __packed pdu;
1297 struct l2cap_chan *chan;
1298 struct pid *pid;
1299 int count;
1300 };
1301
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 struct l2cap_ecred_conn_data *conn = data;
1305 struct pid *pid;
1306
1307 if (chan == conn->chan)
1308 return;
1309
1310 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 return;
1312
1313 pid = chan->ops->get_peer_pid(chan);
1314
1315 /* Only add deferred channels with the same PID/PSM */
1316 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 return;
1319
1320 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 return;
1322
1323 l2cap_ecred_init(chan, 0);
1324
1325 /* Set the same ident so we can match on the rsp */
1326 chan->ident = conn->chan->ident;
1327
1328 /* Include all channels deferred */
1329 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330
1331 conn->count++;
1332 }
1333
l2cap_ecred_connect(struct l2cap_chan * chan)1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 struct l2cap_conn *conn = chan->conn;
1337 struct l2cap_ecred_conn_data data;
1338
1339 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 return;
1341
1342 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 return;
1344
1345 l2cap_ecred_init(chan, 0);
1346
1347 memset(&data, 0, sizeof(data));
1348 data.pdu.req.psm = chan->psm;
1349 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1350 data.pdu.req.mps = cpu_to_le16(chan->mps);
1351 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1353
1354 chan->ident = l2cap_get_ident(conn);
1355
1356 data.count = 1;
1357 data.chan = chan;
1358 data.pid = chan->ops->get_peer_pid(chan);
1359
1360 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361
1362 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 &data.pdu);
1365 }
1366
l2cap_le_start(struct l2cap_chan * chan)1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 struct l2cap_conn *conn = chan->conn;
1370
1371 if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 return;
1373
1374 if (!chan->psm) {
1375 l2cap_chan_ready(chan);
1376 return;
1377 }
1378
1379 if (chan->state == BT_CONNECT) {
1380 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 l2cap_ecred_connect(chan);
1382 else
1383 l2cap_le_connect(chan);
1384 }
1385 }
1386
l2cap_start_connection(struct l2cap_chan * chan)1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 if (chan->conn->hcon->type == LE_LINK) {
1390 l2cap_le_start(chan);
1391 } else {
1392 l2cap_send_conn_req(chan);
1393 }
1394 }
1395
l2cap_request_info(struct l2cap_conn * conn)1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 struct l2cap_info_req req;
1399
1400 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 return;
1402
1403 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404
1405 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 conn->info_ident = l2cap_get_ident(conn);
1407
1408 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409
1410 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 sizeof(req), &req);
1412 }
1413
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1415 struct l2cap_chan *chan)
1416 {
1417 /* The minimum encryption key size needs to be enforced by the
1418 * host stack before establishing any L2CAP connections. The
1419 * specification in theory allows a minimum of 1, but to align
1420 * BR/EDR and LE transports, a minimum of 7 is chosen.
1421 *
1422 * This check might also be called for unencrypted connections
1423 * that have no key size requirements. Ensure that the link is
1424 * actually encrypted before enforcing a key size.
1425 */
1426 int min_key_size = hcon->hdev->min_enc_key_size;
1427
1428 /* On FIPS security level, key size must be 16 bytes */
1429 if (chan->sec_level == BT_SECURITY_FIPS)
1430 min_key_size = 16;
1431
1432 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1433 hcon->enc_key_size >= min_key_size);
1434 }
1435
l2cap_do_start(struct l2cap_chan * chan)1436 static void l2cap_do_start(struct l2cap_chan *chan)
1437 {
1438 struct l2cap_conn *conn = chan->conn;
1439
1440 if (conn->hcon->type == LE_LINK) {
1441 l2cap_le_start(chan);
1442 return;
1443 }
1444
1445 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1446 l2cap_request_info(conn);
1447 return;
1448 }
1449
1450 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1451 return;
1452
1453 if (!l2cap_chan_check_security(chan, true) ||
1454 !__l2cap_no_conn_pending(chan))
1455 return;
1456
1457 if (l2cap_check_enc_key_size(conn->hcon, chan))
1458 l2cap_start_connection(chan);
1459 else
1460 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1461 }
1462
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1463 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1464 {
1465 u32 local_feat_mask = l2cap_feat_mask;
1466 if (!disable_ertm)
1467 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1468
1469 switch (mode) {
1470 case L2CAP_MODE_ERTM:
1471 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1472 case L2CAP_MODE_STREAMING:
1473 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1474 default:
1475 return 0x00;
1476 }
1477 }
1478
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1479 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1480 {
1481 struct l2cap_conn *conn = chan->conn;
1482 struct l2cap_disconn_req req;
1483
1484 if (!conn)
1485 return;
1486
1487 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1488 __clear_retrans_timer(chan);
1489 __clear_monitor_timer(chan);
1490 __clear_ack_timer(chan);
1491 }
1492
1493 req.dcid = cpu_to_le16(chan->dcid);
1494 req.scid = cpu_to_le16(chan->scid);
1495 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1496 sizeof(req), &req);
1497
1498 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1499 }
1500
1501 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1502 static void l2cap_conn_start(struct l2cap_conn *conn)
1503 {
1504 struct l2cap_chan *chan, *tmp;
1505
1506 BT_DBG("conn %p", conn);
1507
1508 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1509 l2cap_chan_lock(chan);
1510
1511 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1512 l2cap_chan_ready(chan);
1513 l2cap_chan_unlock(chan);
1514 continue;
1515 }
1516
1517 if (chan->state == BT_CONNECT) {
1518 if (!l2cap_chan_check_security(chan, true) ||
1519 !__l2cap_no_conn_pending(chan)) {
1520 l2cap_chan_unlock(chan);
1521 continue;
1522 }
1523
1524 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1525 && test_bit(CONF_STATE2_DEVICE,
1526 &chan->conf_state)) {
1527 l2cap_chan_close(chan, ECONNRESET);
1528 l2cap_chan_unlock(chan);
1529 continue;
1530 }
1531
1532 if (l2cap_check_enc_key_size(conn->hcon, chan))
1533 l2cap_start_connection(chan);
1534 else
1535 l2cap_chan_close(chan, ECONNREFUSED);
1536
1537 } else if (chan->state == BT_CONNECT2) {
1538 struct l2cap_conn_rsp rsp;
1539 char buf[128];
1540 rsp.scid = cpu_to_le16(chan->dcid);
1541 rsp.dcid = cpu_to_le16(chan->scid);
1542
1543 if (l2cap_chan_check_security(chan, false)) {
1544 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1545 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1546 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1547 chan->ops->defer(chan);
1548
1549 } else {
1550 l2cap_state_change(chan, BT_CONFIG);
1551 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1552 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1553 }
1554 } else {
1555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1557 }
1558
1559 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1560 sizeof(rsp), &rsp);
1561
1562 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1563 rsp.result != L2CAP_CR_SUCCESS) {
1564 l2cap_chan_unlock(chan);
1565 continue;
1566 }
1567
1568 set_bit(CONF_REQ_SENT, &chan->conf_state);
1569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1571 chan->num_conf_req++;
1572 }
1573
1574 l2cap_chan_unlock(chan);
1575 }
1576 }
1577
l2cap_le_conn_ready(struct l2cap_conn * conn)1578 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1579 {
1580 struct hci_conn *hcon = conn->hcon;
1581 struct hci_dev *hdev = hcon->hdev;
1582
1583 BT_DBG("%s conn %p", hdev->name, conn);
1584
1585 /* For outgoing pairing which doesn't necessarily have an
1586 * associated socket (e.g. mgmt_pair_device).
1587 */
1588 if (hcon->out)
1589 smp_conn_security(hcon, hcon->pending_sec_level);
1590
1591 /* For LE peripheral connections, make sure the connection interval
1592 * is in the range of the minimum and maximum interval that has
1593 * been configured for this connection. If not, then trigger
1594 * the connection update procedure.
1595 */
1596 if (hcon->role == HCI_ROLE_SLAVE &&
1597 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1598 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1599 struct l2cap_conn_param_update_req req;
1600
1601 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1602 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1603 req.latency = cpu_to_le16(hcon->le_conn_latency);
1604 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1605
1606 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1607 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1608 }
1609 }
1610
l2cap_conn_ready(struct l2cap_conn * conn)1611 static void l2cap_conn_ready(struct l2cap_conn *conn)
1612 {
1613 struct l2cap_chan *chan;
1614 struct hci_conn *hcon = conn->hcon;
1615
1616 BT_DBG("conn %p", conn);
1617
1618 if (hcon->type == ACL_LINK)
1619 l2cap_request_info(conn);
1620
1621 mutex_lock(&conn->lock);
1622
1623 list_for_each_entry(chan, &conn->chan_l, list) {
1624
1625 l2cap_chan_lock(chan);
1626
1627 if (hcon->type == LE_LINK) {
1628 l2cap_le_start(chan);
1629 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1630 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1631 l2cap_chan_ready(chan);
1632 } else if (chan->state == BT_CONNECT) {
1633 l2cap_do_start(chan);
1634 }
1635
1636 l2cap_chan_unlock(chan);
1637 }
1638
1639 mutex_unlock(&conn->lock);
1640
1641 if (hcon->type == LE_LINK)
1642 l2cap_le_conn_ready(conn);
1643
1644 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1645 }
1646
1647 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1648 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1649 {
1650 struct l2cap_chan *chan;
1651
1652 BT_DBG("conn %p", conn);
1653
1654 list_for_each_entry(chan, &conn->chan_l, list) {
1655 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1656 l2cap_chan_set_err(chan, err);
1657 }
1658 }
1659
l2cap_info_timeout(struct work_struct * work)1660 static void l2cap_info_timeout(struct work_struct *work)
1661 {
1662 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1663 info_timer.work);
1664
1665 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1666 conn->info_ident = 0;
1667
1668 mutex_lock(&conn->lock);
1669 l2cap_conn_start(conn);
1670 mutex_unlock(&conn->lock);
1671 }
1672
1673 /*
1674 * l2cap_user
1675 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1676 * callback is called during registration. The ->remove callback is called
1677 * during unregistration.
1678 * An l2cap_user object can either be explicitly unregistered or when the
1679 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1680 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1681 * External modules must own a reference to the l2cap_conn object if they intend
1682 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1683 * any time if they don't.
1684 */
1685
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1686 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1687 {
1688 struct hci_dev *hdev = conn->hcon->hdev;
1689 int ret;
1690
1691 /* We need to check whether l2cap_conn is registered. If it is not, we
1692 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1693 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1694 * relies on the parent hci_conn object to be locked. This itself relies
1695 * on the hci_dev object to be locked. So we must lock the hci device
1696 * here, too. */
1697
1698 hci_dev_lock(hdev);
1699
1700 if (!list_empty(&user->list)) {
1701 ret = -EINVAL;
1702 goto out_unlock;
1703 }
1704
1705 /* conn->hchan is NULL after l2cap_conn_del() was called */
1706 if (!conn->hchan) {
1707 ret = -ENODEV;
1708 goto out_unlock;
1709 }
1710
1711 ret = user->probe(conn, user);
1712 if (ret)
1713 goto out_unlock;
1714
1715 list_add(&user->list, &conn->users);
1716 ret = 0;
1717
1718 out_unlock:
1719 hci_dev_unlock(hdev);
1720 return ret;
1721 }
1722 EXPORT_SYMBOL(l2cap_register_user);
1723
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1724 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1725 {
1726 struct hci_dev *hdev = conn->hcon->hdev;
1727
1728 hci_dev_lock(hdev);
1729
1730 if (list_empty(&user->list))
1731 goto out_unlock;
1732
1733 list_del_init(&user->list);
1734 user->remove(conn, user);
1735
1736 out_unlock:
1737 hci_dev_unlock(hdev);
1738 }
1739 EXPORT_SYMBOL(l2cap_unregister_user);
1740
l2cap_unregister_all_users(struct l2cap_conn * conn)1741 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1742 {
1743 struct l2cap_user *user;
1744
1745 while (!list_empty(&conn->users)) {
1746 user = list_first_entry(&conn->users, struct l2cap_user, list);
1747 list_del_init(&user->list);
1748 user->remove(conn, user);
1749 }
1750 }
1751
l2cap_conn_del(struct hci_conn * hcon,int err)1752 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1753 {
1754 struct l2cap_conn *conn = hcon->l2cap_data;
1755 struct l2cap_chan *chan, *l;
1756
1757 if (!conn)
1758 return;
1759
1760 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1761
1762 mutex_lock(&conn->lock);
1763
1764 kfree_skb(conn->rx_skb);
1765
1766 skb_queue_purge(&conn->pending_rx);
1767
1768 /* We can not call flush_work(&conn->pending_rx_work) here since we
1769 * might block if we are running on a worker from the same workqueue
1770 * pending_rx_work is waiting on.
1771 */
1772 if (work_pending(&conn->pending_rx_work))
1773 cancel_work_sync(&conn->pending_rx_work);
1774
1775 cancel_delayed_work_sync(&conn->id_addr_timer);
1776
1777 l2cap_unregister_all_users(conn);
1778
1779 /* Force the connection to be immediately dropped */
1780 hcon->disc_timeout = 0;
1781
1782 /* Kill channels */
1783 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1784 l2cap_chan_hold(chan);
1785 l2cap_chan_lock(chan);
1786
1787 l2cap_chan_del(chan, err);
1788
1789 chan->ops->close(chan);
1790
1791 l2cap_chan_unlock(chan);
1792 l2cap_chan_put(chan);
1793 }
1794
1795 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1796 cancel_delayed_work_sync(&conn->info_timer);
1797
1798 hci_chan_del(conn->hchan);
1799 conn->hchan = NULL;
1800
1801 hcon->l2cap_data = NULL;
1802 mutex_unlock(&conn->lock);
1803 l2cap_conn_put(conn);
1804 }
1805
l2cap_conn_free(struct kref * ref)1806 static void l2cap_conn_free(struct kref *ref)
1807 {
1808 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1809
1810 hci_conn_put(conn->hcon);
1811 kfree(conn);
1812 }
1813
l2cap_conn_get(struct l2cap_conn * conn)1814 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1815 {
1816 kref_get(&conn->ref);
1817 return conn;
1818 }
1819 EXPORT_SYMBOL(l2cap_conn_get);
1820
l2cap_conn_put(struct l2cap_conn * conn)1821 void l2cap_conn_put(struct l2cap_conn *conn)
1822 {
1823 kref_put(&conn->ref, l2cap_conn_free);
1824 }
1825 EXPORT_SYMBOL(l2cap_conn_put);
1826
1827 /* ---- Socket interface ---- */
1828
1829 /* Find socket with psm and source / destination bdaddr.
1830 * Returns closest match.
1831 */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1832 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1833 bdaddr_t *src,
1834 bdaddr_t *dst,
1835 u8 link_type)
1836 {
1837 struct l2cap_chan *c, *tmp, *c1 = NULL;
1838
1839 read_lock(&chan_list_lock);
1840
1841 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1842 if (state && c->state != state)
1843 continue;
1844
1845 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1846 continue;
1847
1848 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1849 continue;
1850
1851 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1852 int src_match, dst_match;
1853 int src_any, dst_any;
1854
1855 /* Exact match. */
1856 src_match = !bacmp(&c->src, src);
1857 dst_match = !bacmp(&c->dst, dst);
1858 if (src_match && dst_match) {
1859 if (!l2cap_chan_hold_unless_zero(c))
1860 continue;
1861
1862 read_unlock(&chan_list_lock);
1863 return c;
1864 }
1865
1866 /* Closest match */
1867 src_any = !bacmp(&c->src, BDADDR_ANY);
1868 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1869 if ((src_match && dst_any) || (src_any && dst_match) ||
1870 (src_any && dst_any))
1871 c1 = c;
1872 }
1873 }
1874
1875 if (c1)
1876 c1 = l2cap_chan_hold_unless_zero(c1);
1877
1878 read_unlock(&chan_list_lock);
1879
1880 return c1;
1881 }
1882
l2cap_monitor_timeout(struct work_struct * work)1883 static void l2cap_monitor_timeout(struct work_struct *work)
1884 {
1885 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1886 monitor_timer.work);
1887
1888 BT_DBG("chan %p", chan);
1889
1890 l2cap_chan_lock(chan);
1891
1892 if (!chan->conn) {
1893 l2cap_chan_unlock(chan);
1894 l2cap_chan_put(chan);
1895 return;
1896 }
1897
1898 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1899
1900 l2cap_chan_unlock(chan);
1901 l2cap_chan_put(chan);
1902 }
1903
l2cap_retrans_timeout(struct work_struct * work)1904 static void l2cap_retrans_timeout(struct work_struct *work)
1905 {
1906 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1907 retrans_timer.work);
1908
1909 BT_DBG("chan %p", chan);
1910
1911 l2cap_chan_lock(chan);
1912
1913 if (!chan->conn) {
1914 l2cap_chan_unlock(chan);
1915 l2cap_chan_put(chan);
1916 return;
1917 }
1918
1919 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1920 l2cap_chan_unlock(chan);
1921 l2cap_chan_put(chan);
1922 }
1923
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1924 static void l2cap_streaming_send(struct l2cap_chan *chan,
1925 struct sk_buff_head *skbs)
1926 {
1927 struct sk_buff *skb;
1928 struct l2cap_ctrl *control;
1929
1930 BT_DBG("chan %p, skbs %p", chan, skbs);
1931
1932 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1933
1934 while (!skb_queue_empty(&chan->tx_q)) {
1935
1936 skb = skb_dequeue(&chan->tx_q);
1937
1938 bt_cb(skb)->l2cap.retries = 1;
1939 control = &bt_cb(skb)->l2cap;
1940
1941 control->reqseq = 0;
1942 control->txseq = chan->next_tx_seq;
1943
1944 __pack_control(chan, control, skb);
1945
1946 if (chan->fcs == L2CAP_FCS_CRC16) {
1947 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1948 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1949 }
1950
1951 l2cap_do_send(chan, skb);
1952
1953 BT_DBG("Sent txseq %u", control->txseq);
1954
1955 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1956 chan->frames_sent++;
1957 }
1958 }
1959
l2cap_ertm_send(struct l2cap_chan * chan)1960 static int l2cap_ertm_send(struct l2cap_chan *chan)
1961 {
1962 struct sk_buff *skb, *tx_skb;
1963 struct l2cap_ctrl *control;
1964 int sent = 0;
1965
1966 BT_DBG("chan %p", chan);
1967
1968 if (chan->state != BT_CONNECTED)
1969 return -ENOTCONN;
1970
1971 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1972 return 0;
1973
1974 while (chan->tx_send_head &&
1975 chan->unacked_frames < chan->remote_tx_win &&
1976 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1977
1978 skb = chan->tx_send_head;
1979
1980 bt_cb(skb)->l2cap.retries = 1;
1981 control = &bt_cb(skb)->l2cap;
1982
1983 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1984 control->final = 1;
1985
1986 control->reqseq = chan->buffer_seq;
1987 chan->last_acked_seq = chan->buffer_seq;
1988 control->txseq = chan->next_tx_seq;
1989
1990 __pack_control(chan, control, skb);
1991
1992 if (chan->fcs == L2CAP_FCS_CRC16) {
1993 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1994 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1995 }
1996
1997 /* Clone after data has been modified. Data is assumed to be
1998 read-only (for locking purposes) on cloned sk_buffs.
1999 */
2000 tx_skb = skb_clone(skb, GFP_KERNEL);
2001
2002 if (!tx_skb)
2003 break;
2004
2005 __set_retrans_timer(chan);
2006
2007 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2008 chan->unacked_frames++;
2009 chan->frames_sent++;
2010 sent++;
2011
2012 if (skb_queue_is_last(&chan->tx_q, skb))
2013 chan->tx_send_head = NULL;
2014 else
2015 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2016
2017 l2cap_do_send(chan, tx_skb);
2018 BT_DBG("Sent txseq %u", control->txseq);
2019 }
2020
2021 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2022 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2023
2024 return sent;
2025 }
2026
l2cap_ertm_resend(struct l2cap_chan * chan)2027 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2028 {
2029 struct l2cap_ctrl control;
2030 struct sk_buff *skb;
2031 struct sk_buff *tx_skb;
2032 u16 seq;
2033
2034 BT_DBG("chan %p", chan);
2035
2036 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2037 return;
2038
2039 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2040 seq = l2cap_seq_list_pop(&chan->retrans_list);
2041
2042 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2043 if (!skb) {
2044 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2045 seq);
2046 continue;
2047 }
2048
2049 bt_cb(skb)->l2cap.retries++;
2050 control = bt_cb(skb)->l2cap;
2051
2052 if (chan->max_tx != 0 &&
2053 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2054 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2055 l2cap_send_disconn_req(chan, ECONNRESET);
2056 l2cap_seq_list_clear(&chan->retrans_list);
2057 break;
2058 }
2059
2060 control.reqseq = chan->buffer_seq;
2061 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2062 control.final = 1;
2063 else
2064 control.final = 0;
2065
2066 if (skb_cloned(skb)) {
2067 /* Cloned sk_buffs are read-only, so we need a
2068 * writeable copy
2069 */
2070 tx_skb = skb_copy(skb, GFP_KERNEL);
2071 } else {
2072 tx_skb = skb_clone(skb, GFP_KERNEL);
2073 }
2074
2075 if (!tx_skb) {
2076 l2cap_seq_list_clear(&chan->retrans_list);
2077 break;
2078 }
2079
2080 /* Update skb contents */
2081 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2082 put_unaligned_le32(__pack_extended_control(&control),
2083 tx_skb->data + L2CAP_HDR_SIZE);
2084 } else {
2085 put_unaligned_le16(__pack_enhanced_control(&control),
2086 tx_skb->data + L2CAP_HDR_SIZE);
2087 }
2088
2089 /* Update FCS */
2090 if (chan->fcs == L2CAP_FCS_CRC16) {
2091 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2092 tx_skb->len - L2CAP_FCS_SIZE);
2093 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2094 L2CAP_FCS_SIZE);
2095 }
2096
2097 l2cap_do_send(chan, tx_skb);
2098
2099 BT_DBG("Resent txseq %d", control.txseq);
2100
2101 chan->last_acked_seq = chan->buffer_seq;
2102 }
2103 }
2104
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 struct l2cap_ctrl *control)
2107 {
2108 BT_DBG("chan %p, control %p", chan, control);
2109
2110 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 l2cap_ertm_resend(chan);
2112 }
2113
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 struct l2cap_ctrl *control)
2116 {
2117 struct sk_buff *skb;
2118
2119 BT_DBG("chan %p, control %p", chan, control);
2120
2121 if (control->poll)
2122 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123
2124 l2cap_seq_list_clear(&chan->retrans_list);
2125
2126 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 return;
2128
2129 if (chan->unacked_frames) {
2130 skb_queue_walk(&chan->tx_q, skb) {
2131 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2132 skb == chan->tx_send_head)
2133 break;
2134 }
2135
2136 skb_queue_walk_from(&chan->tx_q, skb) {
2137 if (skb == chan->tx_send_head)
2138 break;
2139
2140 l2cap_seq_list_append(&chan->retrans_list,
2141 bt_cb(skb)->l2cap.txseq);
2142 }
2143
2144 l2cap_ertm_resend(chan);
2145 }
2146 }
2147
l2cap_send_ack(struct l2cap_chan * chan)2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 {
2150 struct l2cap_ctrl control;
2151 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 chan->last_acked_seq);
2153 int threshold;
2154
2155 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 chan, chan->last_acked_seq, chan->buffer_seq);
2157
2158 memset(&control, 0, sizeof(control));
2159 control.sframe = 1;
2160
2161 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 __clear_ack_timer(chan);
2164 control.super = L2CAP_SUPER_RNR;
2165 control.reqseq = chan->buffer_seq;
2166 l2cap_send_sframe(chan, &control);
2167 } else {
2168 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 l2cap_ertm_send(chan);
2170 /* If any i-frames were sent, they included an ack */
2171 if (chan->buffer_seq == chan->last_acked_seq)
2172 frames_to_ack = 0;
2173 }
2174
2175 /* Ack now if the window is 3/4ths full.
2176 * Calculate without mul or div
2177 */
2178 threshold = chan->ack_win;
2179 threshold += threshold << 1;
2180 threshold >>= 2;
2181
2182 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2183 threshold);
2184
2185 if (frames_to_ack >= threshold) {
2186 __clear_ack_timer(chan);
2187 control.super = L2CAP_SUPER_RR;
2188 control.reqseq = chan->buffer_seq;
2189 l2cap_send_sframe(chan, &control);
2190 frames_to_ack = 0;
2191 }
2192
2193 if (frames_to_ack)
2194 __set_ack_timer(chan);
2195 }
2196 }
2197
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 struct msghdr *msg, int len,
2200 int count, struct sk_buff *skb)
2201 {
2202 struct l2cap_conn *conn = chan->conn;
2203 struct sk_buff **frag;
2204 int sent = 0;
2205
2206 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2207 return -EFAULT;
2208
2209 sent += count;
2210 len -= count;
2211
2212 /* Continuation fragments (no L2CAP header) */
2213 frag = &skb_shinfo(skb)->frag_list;
2214 while (len) {
2215 struct sk_buff *tmp;
2216
2217 count = min_t(unsigned int, conn->mtu, len);
2218
2219 tmp = chan->ops->alloc_skb(chan, 0, count,
2220 msg->msg_flags & MSG_DONTWAIT);
2221 if (IS_ERR(tmp))
2222 return PTR_ERR(tmp);
2223
2224 *frag = tmp;
2225
2226 if (!copy_from_iter_full(skb_put(*frag, count), count,
2227 &msg->msg_iter))
2228 return -EFAULT;
2229
2230 sent += count;
2231 len -= count;
2232
2233 skb->len += (*frag)->len;
2234 skb->data_len += (*frag)->len;
2235
2236 frag = &(*frag)->next;
2237 }
2238
2239 return sent;
2240 }
2241
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2242 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2243 struct msghdr *msg, size_t len)
2244 {
2245 struct l2cap_conn *conn = chan->conn;
2246 struct sk_buff *skb;
2247 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2248 struct l2cap_hdr *lh;
2249
2250 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2251 __le16_to_cpu(chan->psm), len);
2252
2253 count = min_t(unsigned int, (conn->mtu - hlen), len);
2254
2255 skb = chan->ops->alloc_skb(chan, hlen, count,
2256 msg->msg_flags & MSG_DONTWAIT);
2257 if (IS_ERR(skb))
2258 return skb;
2259
2260 /* Create L2CAP header */
2261 lh = skb_put(skb, L2CAP_HDR_SIZE);
2262 lh->cid = cpu_to_le16(chan->dcid);
2263 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2264 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2265
2266 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2267 if (unlikely(err < 0)) {
2268 kfree_skb(skb);
2269 return ERR_PTR(err);
2270 }
2271 return skb;
2272 }
2273
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2274 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2275 struct msghdr *msg, size_t len)
2276 {
2277 struct l2cap_conn *conn = chan->conn;
2278 struct sk_buff *skb;
2279 int err, count;
2280 struct l2cap_hdr *lh;
2281
2282 BT_DBG("chan %p len %zu", chan, len);
2283
2284 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2285
2286 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2287 msg->msg_flags & MSG_DONTWAIT);
2288 if (IS_ERR(skb))
2289 return skb;
2290
2291 /* Create L2CAP header */
2292 lh = skb_put(skb, L2CAP_HDR_SIZE);
2293 lh->cid = cpu_to_le16(chan->dcid);
2294 lh->len = cpu_to_le16(len);
2295
2296 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2297 if (unlikely(err < 0)) {
2298 kfree_skb(skb);
2299 return ERR_PTR(err);
2300 }
2301 return skb;
2302 }
2303
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2304 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2305 struct msghdr *msg, size_t len,
2306 u16 sdulen)
2307 {
2308 struct l2cap_conn *conn = chan->conn;
2309 struct sk_buff *skb;
2310 int err, count, hlen;
2311 struct l2cap_hdr *lh;
2312
2313 BT_DBG("chan %p len %zu", chan, len);
2314
2315 if (!conn)
2316 return ERR_PTR(-ENOTCONN);
2317
2318 hlen = __ertm_hdr_size(chan);
2319
2320 if (sdulen)
2321 hlen += L2CAP_SDULEN_SIZE;
2322
2323 if (chan->fcs == L2CAP_FCS_CRC16)
2324 hlen += L2CAP_FCS_SIZE;
2325
2326 count = min_t(unsigned int, (conn->mtu - hlen), len);
2327
2328 skb = chan->ops->alloc_skb(chan, hlen, count,
2329 msg->msg_flags & MSG_DONTWAIT);
2330 if (IS_ERR(skb))
2331 return skb;
2332
2333 /* Create L2CAP header */
2334 lh = skb_put(skb, L2CAP_HDR_SIZE);
2335 lh->cid = cpu_to_le16(chan->dcid);
2336 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2337
2338 /* Control header is populated later */
2339 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2340 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2341 else
2342 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2343
2344 if (sdulen)
2345 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2346
2347 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2348 if (unlikely(err < 0)) {
2349 kfree_skb(skb);
2350 return ERR_PTR(err);
2351 }
2352
2353 bt_cb(skb)->l2cap.fcs = chan->fcs;
2354 bt_cb(skb)->l2cap.retries = 0;
2355 return skb;
2356 }
2357
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2358 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2359 struct sk_buff_head *seg_queue,
2360 struct msghdr *msg, size_t len)
2361 {
2362 struct sk_buff *skb;
2363 u16 sdu_len;
2364 size_t pdu_len;
2365 u8 sar;
2366
2367 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2368
2369 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2370 * so fragmented skbs are not used. The HCI layer's handling
2371 * of fragmented skbs is not compatible with ERTM's queueing.
2372 */
2373
2374 /* PDU size is derived from the HCI MTU */
2375 pdu_len = chan->conn->mtu;
2376
2377 /* Constrain PDU size for BR/EDR connections */
2378 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2379
2380 /* Adjust for largest possible L2CAP overhead. */
2381 if (chan->fcs)
2382 pdu_len -= L2CAP_FCS_SIZE;
2383
2384 pdu_len -= __ertm_hdr_size(chan);
2385
2386 /* Remote device may have requested smaller PDUs */
2387 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2388
2389 if (len <= pdu_len) {
2390 sar = L2CAP_SAR_UNSEGMENTED;
2391 sdu_len = 0;
2392 pdu_len = len;
2393 } else {
2394 sar = L2CAP_SAR_START;
2395 sdu_len = len;
2396 }
2397
2398 while (len > 0) {
2399 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2400
2401 if (IS_ERR(skb)) {
2402 __skb_queue_purge(seg_queue);
2403 return PTR_ERR(skb);
2404 }
2405
2406 bt_cb(skb)->l2cap.sar = sar;
2407 __skb_queue_tail(seg_queue, skb);
2408
2409 len -= pdu_len;
2410 if (sdu_len)
2411 sdu_len = 0;
2412
2413 if (len <= pdu_len) {
2414 sar = L2CAP_SAR_END;
2415 pdu_len = len;
2416 } else {
2417 sar = L2CAP_SAR_CONTINUE;
2418 }
2419 }
2420
2421 return 0;
2422 }
2423
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2424 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2425 struct msghdr *msg,
2426 size_t len, u16 sdulen)
2427 {
2428 struct l2cap_conn *conn = chan->conn;
2429 struct sk_buff *skb;
2430 int err, count, hlen;
2431 struct l2cap_hdr *lh;
2432
2433 BT_DBG("chan %p len %zu", chan, len);
2434
2435 if (!conn)
2436 return ERR_PTR(-ENOTCONN);
2437
2438 hlen = L2CAP_HDR_SIZE;
2439
2440 if (sdulen)
2441 hlen += L2CAP_SDULEN_SIZE;
2442
2443 count = min_t(unsigned int, (conn->mtu - hlen), len);
2444
2445 skb = chan->ops->alloc_skb(chan, hlen, count,
2446 msg->msg_flags & MSG_DONTWAIT);
2447 if (IS_ERR(skb))
2448 return skb;
2449
2450 /* Create L2CAP header */
2451 lh = skb_put(skb, L2CAP_HDR_SIZE);
2452 lh->cid = cpu_to_le16(chan->dcid);
2453 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2454
2455 if (sdulen)
2456 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2457
2458 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2459 if (unlikely(err < 0)) {
2460 kfree_skb(skb);
2461 return ERR_PTR(err);
2462 }
2463
2464 return skb;
2465 }
2466
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2467 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2468 struct sk_buff_head *seg_queue,
2469 struct msghdr *msg, size_t len)
2470 {
2471 struct sk_buff *skb;
2472 size_t pdu_len;
2473 u16 sdu_len;
2474
2475 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2476
2477 sdu_len = len;
2478 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2479
2480 while (len > 0) {
2481 if (len <= pdu_len)
2482 pdu_len = len;
2483
2484 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2485 if (IS_ERR(skb)) {
2486 __skb_queue_purge(seg_queue);
2487 return PTR_ERR(skb);
2488 }
2489
2490 __skb_queue_tail(seg_queue, skb);
2491
2492 len -= pdu_len;
2493
2494 if (sdu_len) {
2495 sdu_len = 0;
2496 pdu_len += L2CAP_SDULEN_SIZE;
2497 }
2498 }
2499
2500 return 0;
2501 }
2502
l2cap_le_flowctl_send(struct l2cap_chan * chan)2503 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2504 {
2505 int sent = 0;
2506
2507 BT_DBG("chan %p", chan);
2508
2509 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2510 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2511 chan->tx_credits--;
2512 sent++;
2513 }
2514
2515 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2516 skb_queue_len(&chan->tx_q));
2517 }
2518
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2519 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2520 {
2521 struct sk_buff *skb;
2522 int err;
2523 struct sk_buff_head seg_queue;
2524
2525 if (!chan->conn)
2526 return -ENOTCONN;
2527
2528 /* Connectionless channel */
2529 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2530 skb = l2cap_create_connless_pdu(chan, msg, len);
2531 if (IS_ERR(skb))
2532 return PTR_ERR(skb);
2533
2534 l2cap_do_send(chan, skb);
2535 return len;
2536 }
2537
2538 switch (chan->mode) {
2539 case L2CAP_MODE_LE_FLOWCTL:
2540 case L2CAP_MODE_EXT_FLOWCTL:
2541 /* Check outgoing MTU */
2542 if (len > chan->omtu)
2543 return -EMSGSIZE;
2544
2545 __skb_queue_head_init(&seg_queue);
2546
2547 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2548
2549 if (chan->state != BT_CONNECTED) {
2550 __skb_queue_purge(&seg_queue);
2551 err = -ENOTCONN;
2552 }
2553
2554 if (err)
2555 return err;
2556
2557 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2558
2559 l2cap_le_flowctl_send(chan);
2560
2561 if (!chan->tx_credits)
2562 chan->ops->suspend(chan);
2563
2564 err = len;
2565
2566 break;
2567
2568 case L2CAP_MODE_BASIC:
2569 /* Check outgoing MTU */
2570 if (len > chan->omtu)
2571 return -EMSGSIZE;
2572
2573 /* Create a basic PDU */
2574 skb = l2cap_create_basic_pdu(chan, msg, len);
2575 if (IS_ERR(skb))
2576 return PTR_ERR(skb);
2577
2578 l2cap_do_send(chan, skb);
2579 err = len;
2580 break;
2581
2582 case L2CAP_MODE_ERTM:
2583 case L2CAP_MODE_STREAMING:
2584 /* Check outgoing MTU */
2585 if (len > chan->omtu) {
2586 err = -EMSGSIZE;
2587 break;
2588 }
2589
2590 __skb_queue_head_init(&seg_queue);
2591
2592 /* Do segmentation before calling in to the state machine,
2593 * since it's possible to block while waiting for memory
2594 * allocation.
2595 */
2596 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2597
2598 if (err)
2599 break;
2600
2601 if (chan->mode == L2CAP_MODE_ERTM)
2602 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2603 else
2604 l2cap_streaming_send(chan, &seg_queue);
2605
2606 err = len;
2607
2608 /* If the skbs were not queued for sending, they'll still be in
2609 * seg_queue and need to be purged.
2610 */
2611 __skb_queue_purge(&seg_queue);
2612 break;
2613
2614 default:
2615 BT_DBG("bad state %1.1x", chan->mode);
2616 err = -EBADFD;
2617 }
2618
2619 return err;
2620 }
2621 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2622
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2623 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2624 {
2625 struct l2cap_ctrl control;
2626 u16 seq;
2627
2628 BT_DBG("chan %p, txseq %u", chan, txseq);
2629
2630 memset(&control, 0, sizeof(control));
2631 control.sframe = 1;
2632 control.super = L2CAP_SUPER_SREJ;
2633
2634 for (seq = chan->expected_tx_seq; seq != txseq;
2635 seq = __next_seq(chan, seq)) {
2636 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2637 control.reqseq = seq;
2638 l2cap_send_sframe(chan, &control);
2639 l2cap_seq_list_append(&chan->srej_list, seq);
2640 }
2641 }
2642
2643 chan->expected_tx_seq = __next_seq(chan, txseq);
2644 }
2645
l2cap_send_srej_tail(struct l2cap_chan * chan)2646 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2647 {
2648 struct l2cap_ctrl control;
2649
2650 BT_DBG("chan %p", chan);
2651
2652 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2653 return;
2654
2655 memset(&control, 0, sizeof(control));
2656 control.sframe = 1;
2657 control.super = L2CAP_SUPER_SREJ;
2658 control.reqseq = chan->srej_list.tail;
2659 l2cap_send_sframe(chan, &control);
2660 }
2661
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2662 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2663 {
2664 struct l2cap_ctrl control;
2665 u16 initial_head;
2666 u16 seq;
2667
2668 BT_DBG("chan %p, txseq %u", chan, txseq);
2669
2670 memset(&control, 0, sizeof(control));
2671 control.sframe = 1;
2672 control.super = L2CAP_SUPER_SREJ;
2673
2674 /* Capture initial list head to allow only one pass through the list. */
2675 initial_head = chan->srej_list.head;
2676
2677 do {
2678 seq = l2cap_seq_list_pop(&chan->srej_list);
2679 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2680 break;
2681
2682 control.reqseq = seq;
2683 l2cap_send_sframe(chan, &control);
2684 l2cap_seq_list_append(&chan->srej_list, seq);
2685 } while (chan->srej_list.head != initial_head);
2686 }
2687
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2688 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2689 {
2690 struct sk_buff *acked_skb;
2691 u16 ackseq;
2692
2693 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2694
2695 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2696 return;
2697
2698 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2699 chan->expected_ack_seq, chan->unacked_frames);
2700
2701 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2702 ackseq = __next_seq(chan, ackseq)) {
2703
2704 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2705 if (acked_skb) {
2706 skb_unlink(acked_skb, &chan->tx_q);
2707 kfree_skb(acked_skb);
2708 chan->unacked_frames--;
2709 }
2710 }
2711
2712 chan->expected_ack_seq = reqseq;
2713
2714 if (chan->unacked_frames == 0)
2715 __clear_retrans_timer(chan);
2716
2717 BT_DBG("unacked_frames %u", chan->unacked_frames);
2718 }
2719
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2720 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2721 {
2722 BT_DBG("chan %p", chan);
2723
2724 chan->expected_tx_seq = chan->buffer_seq;
2725 l2cap_seq_list_clear(&chan->srej_list);
2726 skb_queue_purge(&chan->srej_q);
2727 chan->rx_state = L2CAP_RX_STATE_RECV;
2728 }
2729
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2730 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2731 struct l2cap_ctrl *control,
2732 struct sk_buff_head *skbs, u8 event)
2733 {
2734 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2735 event);
2736
2737 switch (event) {
2738 case L2CAP_EV_DATA_REQUEST:
2739 if (chan->tx_send_head == NULL)
2740 chan->tx_send_head = skb_peek(skbs);
2741
2742 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2743 l2cap_ertm_send(chan);
2744 break;
2745 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2746 BT_DBG("Enter LOCAL_BUSY");
2747 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748
2749 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2750 /* The SREJ_SENT state must be aborted if we are to
2751 * enter the LOCAL_BUSY state.
2752 */
2753 l2cap_abort_rx_srej_sent(chan);
2754 }
2755
2756 l2cap_send_ack(chan);
2757
2758 break;
2759 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2760 BT_DBG("Exit LOCAL_BUSY");
2761 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2762
2763 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2764 struct l2cap_ctrl local_control;
2765
2766 memset(&local_control, 0, sizeof(local_control));
2767 local_control.sframe = 1;
2768 local_control.super = L2CAP_SUPER_RR;
2769 local_control.poll = 1;
2770 local_control.reqseq = chan->buffer_seq;
2771 l2cap_send_sframe(chan, &local_control);
2772
2773 chan->retry_count = 1;
2774 __set_monitor_timer(chan);
2775 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2776 }
2777 break;
2778 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2779 l2cap_process_reqseq(chan, control->reqseq);
2780 break;
2781 case L2CAP_EV_EXPLICIT_POLL:
2782 l2cap_send_rr_or_rnr(chan, 1);
2783 chan->retry_count = 1;
2784 __set_monitor_timer(chan);
2785 __clear_ack_timer(chan);
2786 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2787 break;
2788 case L2CAP_EV_RETRANS_TO:
2789 l2cap_send_rr_or_rnr(chan, 1);
2790 chan->retry_count = 1;
2791 __set_monitor_timer(chan);
2792 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2793 break;
2794 case L2CAP_EV_RECV_FBIT:
2795 /* Nothing to process */
2796 break;
2797 default:
2798 break;
2799 }
2800 }
2801
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2802 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2803 struct l2cap_ctrl *control,
2804 struct sk_buff_head *skbs, u8 event)
2805 {
2806 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2807 event);
2808
2809 switch (event) {
2810 case L2CAP_EV_DATA_REQUEST:
2811 if (chan->tx_send_head == NULL)
2812 chan->tx_send_head = skb_peek(skbs);
2813 /* Queue data, but don't send. */
2814 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2815 break;
2816 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2817 BT_DBG("Enter LOCAL_BUSY");
2818 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2819
2820 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2821 /* The SREJ_SENT state must be aborted if we are to
2822 * enter the LOCAL_BUSY state.
2823 */
2824 l2cap_abort_rx_srej_sent(chan);
2825 }
2826
2827 l2cap_send_ack(chan);
2828
2829 break;
2830 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2831 BT_DBG("Exit LOCAL_BUSY");
2832 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2833
2834 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2835 struct l2cap_ctrl local_control;
2836 memset(&local_control, 0, sizeof(local_control));
2837 local_control.sframe = 1;
2838 local_control.super = L2CAP_SUPER_RR;
2839 local_control.poll = 1;
2840 local_control.reqseq = chan->buffer_seq;
2841 l2cap_send_sframe(chan, &local_control);
2842
2843 chan->retry_count = 1;
2844 __set_monitor_timer(chan);
2845 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2846 }
2847 break;
2848 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2849 l2cap_process_reqseq(chan, control->reqseq);
2850 fallthrough;
2851
2852 case L2CAP_EV_RECV_FBIT:
2853 if (control && control->final) {
2854 __clear_monitor_timer(chan);
2855 if (chan->unacked_frames > 0)
2856 __set_retrans_timer(chan);
2857 chan->retry_count = 0;
2858 chan->tx_state = L2CAP_TX_STATE_XMIT;
2859 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2860 }
2861 break;
2862 case L2CAP_EV_EXPLICIT_POLL:
2863 /* Ignore */
2864 break;
2865 case L2CAP_EV_MONITOR_TO:
2866 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2867 l2cap_send_rr_or_rnr(chan, 1);
2868 __set_monitor_timer(chan);
2869 chan->retry_count++;
2870 } else {
2871 l2cap_send_disconn_req(chan, ECONNABORTED);
2872 }
2873 break;
2874 default:
2875 break;
2876 }
2877 }
2878
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2879 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2880 struct sk_buff_head *skbs, u8 event)
2881 {
2882 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2883 chan, control, skbs, event, chan->tx_state);
2884
2885 switch (chan->tx_state) {
2886 case L2CAP_TX_STATE_XMIT:
2887 l2cap_tx_state_xmit(chan, control, skbs, event);
2888 break;
2889 case L2CAP_TX_STATE_WAIT_F:
2890 l2cap_tx_state_wait_f(chan, control, skbs, event);
2891 break;
2892 default:
2893 /* Ignore event */
2894 break;
2895 }
2896 }
2897
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2898 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2899 struct l2cap_ctrl *control)
2900 {
2901 BT_DBG("chan %p, control %p", chan, control);
2902 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2903 }
2904
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2905 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2906 struct l2cap_ctrl *control)
2907 {
2908 BT_DBG("chan %p, control %p", chan, control);
2909 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2910 }
2911
2912 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2913 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2914 {
2915 struct sk_buff *nskb;
2916 struct l2cap_chan *chan;
2917
2918 BT_DBG("conn %p", conn);
2919
2920 list_for_each_entry(chan, &conn->chan_l, list) {
2921 if (chan->chan_type != L2CAP_CHAN_RAW)
2922 continue;
2923
2924 /* Don't send frame to the channel it came from */
2925 if (bt_cb(skb)->l2cap.chan == chan)
2926 continue;
2927
2928 nskb = skb_clone(skb, GFP_KERNEL);
2929 if (!nskb)
2930 continue;
2931 if (chan->ops->recv(chan, nskb))
2932 kfree_skb(nskb);
2933 }
2934 }
2935
2936 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2937 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2938 u8 ident, u16 dlen, void *data)
2939 {
2940 struct sk_buff *skb, **frag;
2941 struct l2cap_cmd_hdr *cmd;
2942 struct l2cap_hdr *lh;
2943 int len, count;
2944
2945 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2946 conn, code, ident, dlen);
2947
2948 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2949 return NULL;
2950
2951 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2952 count = min_t(unsigned int, conn->mtu, len);
2953
2954 skb = bt_skb_alloc(count, GFP_KERNEL);
2955 if (!skb)
2956 return NULL;
2957
2958 lh = skb_put(skb, L2CAP_HDR_SIZE);
2959 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2960
2961 if (conn->hcon->type == LE_LINK)
2962 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2963 else
2964 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2965
2966 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2967 cmd->code = code;
2968 cmd->ident = ident;
2969 cmd->len = cpu_to_le16(dlen);
2970
2971 if (dlen) {
2972 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2973 skb_put_data(skb, data, count);
2974 data += count;
2975 }
2976
2977 len -= skb->len;
2978
2979 /* Continuation fragments (no L2CAP header) */
2980 frag = &skb_shinfo(skb)->frag_list;
2981 while (len) {
2982 count = min_t(unsigned int, conn->mtu, len);
2983
2984 *frag = bt_skb_alloc(count, GFP_KERNEL);
2985 if (!*frag)
2986 goto fail;
2987
2988 skb_put_data(*frag, data, count);
2989
2990 len -= count;
2991 data += count;
2992
2993 frag = &(*frag)->next;
2994 }
2995
2996 return skb;
2997
2998 fail:
2999 kfree_skb(skb);
3000 return NULL;
3001 }
3002
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3003 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3004 unsigned long *val)
3005 {
3006 struct l2cap_conf_opt *opt = *ptr;
3007 int len;
3008
3009 len = L2CAP_CONF_OPT_SIZE + opt->len;
3010 *ptr += len;
3011
3012 *type = opt->type;
3013 *olen = opt->len;
3014
3015 switch (opt->len) {
3016 case 1:
3017 *val = *((u8 *) opt->val);
3018 break;
3019
3020 case 2:
3021 *val = get_unaligned_le16(opt->val);
3022 break;
3023
3024 case 4:
3025 *val = get_unaligned_le32(opt->val);
3026 break;
3027
3028 default:
3029 *val = (unsigned long) opt->val;
3030 break;
3031 }
3032
3033 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3034 return len;
3035 }
3036
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3037 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3038 {
3039 struct l2cap_conf_opt *opt = *ptr;
3040
3041 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3042
3043 if (size < L2CAP_CONF_OPT_SIZE + len)
3044 return;
3045
3046 opt->type = type;
3047 opt->len = len;
3048
3049 switch (len) {
3050 case 1:
3051 *((u8 *) opt->val) = val;
3052 break;
3053
3054 case 2:
3055 put_unaligned_le16(val, opt->val);
3056 break;
3057
3058 case 4:
3059 put_unaligned_le32(val, opt->val);
3060 break;
3061
3062 default:
3063 memcpy(opt->val, (void *) val, len);
3064 break;
3065 }
3066
3067 *ptr += L2CAP_CONF_OPT_SIZE + len;
3068 }
3069
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3070 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3071 {
3072 struct l2cap_conf_efs efs;
3073
3074 switch (chan->mode) {
3075 case L2CAP_MODE_ERTM:
3076 efs.id = chan->local_id;
3077 efs.stype = chan->local_stype;
3078 efs.msdu = cpu_to_le16(chan->local_msdu);
3079 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3080 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3081 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3082 break;
3083
3084 case L2CAP_MODE_STREAMING:
3085 efs.id = 1;
3086 efs.stype = L2CAP_SERV_BESTEFFORT;
3087 efs.msdu = cpu_to_le16(chan->local_msdu);
3088 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3089 efs.acc_lat = 0;
3090 efs.flush_to = 0;
3091 break;
3092
3093 default:
3094 return;
3095 }
3096
3097 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3098 (unsigned long) &efs, size);
3099 }
3100
l2cap_ack_timeout(struct work_struct * work)3101 static void l2cap_ack_timeout(struct work_struct *work)
3102 {
3103 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3104 ack_timer.work);
3105 u16 frames_to_ack;
3106
3107 BT_DBG("chan %p", chan);
3108
3109 l2cap_chan_lock(chan);
3110
3111 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3112 chan->last_acked_seq);
3113
3114 if (frames_to_ack)
3115 l2cap_send_rr_or_rnr(chan, 0);
3116
3117 l2cap_chan_unlock(chan);
3118 l2cap_chan_put(chan);
3119 }
3120
l2cap_ertm_init(struct l2cap_chan * chan)3121 int l2cap_ertm_init(struct l2cap_chan *chan)
3122 {
3123 int err;
3124
3125 chan->next_tx_seq = 0;
3126 chan->expected_tx_seq = 0;
3127 chan->expected_ack_seq = 0;
3128 chan->unacked_frames = 0;
3129 chan->buffer_seq = 0;
3130 chan->frames_sent = 0;
3131 chan->last_acked_seq = 0;
3132 chan->sdu = NULL;
3133 chan->sdu_last_frag = NULL;
3134 chan->sdu_len = 0;
3135
3136 skb_queue_head_init(&chan->tx_q);
3137
3138 if (chan->mode != L2CAP_MODE_ERTM)
3139 return 0;
3140
3141 chan->rx_state = L2CAP_RX_STATE_RECV;
3142 chan->tx_state = L2CAP_TX_STATE_XMIT;
3143
3144 skb_queue_head_init(&chan->srej_q);
3145
3146 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3147 if (err < 0)
3148 return err;
3149
3150 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3151 if (err < 0)
3152 l2cap_seq_list_free(&chan->srej_list);
3153
3154 return err;
3155 }
3156
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3157 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3158 {
3159 switch (mode) {
3160 case L2CAP_MODE_STREAMING:
3161 case L2CAP_MODE_ERTM:
3162 if (l2cap_mode_supported(mode, remote_feat_mask))
3163 return mode;
3164 fallthrough;
3165 default:
3166 return L2CAP_MODE_BASIC;
3167 }
3168 }
3169
__l2cap_ews_supported(struct l2cap_conn * conn)3170 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3171 {
3172 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3173 }
3174
__l2cap_efs_supported(struct l2cap_conn * conn)3175 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3176 {
3177 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3178 }
3179
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3180 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3181 struct l2cap_conf_rfc *rfc)
3182 {
3183 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3184 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3185 }
3186
l2cap_txwin_setup(struct l2cap_chan * chan)3187 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3188 {
3189 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3190 __l2cap_ews_supported(chan->conn)) {
3191 /* use extended control field */
3192 set_bit(FLAG_EXT_CTRL, &chan->flags);
3193 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3194 } else {
3195 chan->tx_win = min_t(u16, chan->tx_win,
3196 L2CAP_DEFAULT_TX_WINDOW);
3197 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3198 }
3199 chan->ack_win = chan->tx_win;
3200 }
3201
l2cap_mtu_auto(struct l2cap_chan * chan)3202 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3203 {
3204 struct hci_conn *conn = chan->conn->hcon;
3205
3206 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3207
3208 /* The 2-DH1 packet has between 2 and 56 information bytes
3209 * (including the 2-byte payload header)
3210 */
3211 if (!(conn->pkt_type & HCI_2DH1))
3212 chan->imtu = 54;
3213
3214 /* The 3-DH1 packet has between 2 and 85 information bytes
3215 * (including the 2-byte payload header)
3216 */
3217 if (!(conn->pkt_type & HCI_3DH1))
3218 chan->imtu = 83;
3219
3220 /* The 2-DH3 packet has between 2 and 369 information bytes
3221 * (including the 2-byte payload header)
3222 */
3223 if (!(conn->pkt_type & HCI_2DH3))
3224 chan->imtu = 367;
3225
3226 /* The 3-DH3 packet has between 2 and 554 information bytes
3227 * (including the 2-byte payload header)
3228 */
3229 if (!(conn->pkt_type & HCI_3DH3))
3230 chan->imtu = 552;
3231
3232 /* The 2-DH5 packet has between 2 and 681 information bytes
3233 * (including the 2-byte payload header)
3234 */
3235 if (!(conn->pkt_type & HCI_2DH5))
3236 chan->imtu = 679;
3237
3238 /* The 3-DH5 packet has between 2 and 1023 information bytes
3239 * (including the 2-byte payload header)
3240 */
3241 if (!(conn->pkt_type & HCI_3DH5))
3242 chan->imtu = 1021;
3243 }
3244
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3245 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3246 {
3247 struct l2cap_conf_req *req = data;
3248 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3249 void *ptr = req->data;
3250 void *endptr = data + data_size;
3251 u16 size;
3252
3253 BT_DBG("chan %p", chan);
3254
3255 if (chan->num_conf_req || chan->num_conf_rsp)
3256 goto done;
3257
3258 switch (chan->mode) {
3259 case L2CAP_MODE_STREAMING:
3260 case L2CAP_MODE_ERTM:
3261 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3262 break;
3263
3264 if (__l2cap_efs_supported(chan->conn))
3265 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3266
3267 fallthrough;
3268 default:
3269 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3270 break;
3271 }
3272
3273 done:
3274 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3275 if (!chan->imtu)
3276 l2cap_mtu_auto(chan);
3277 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3278 endptr - ptr);
3279 }
3280
3281 switch (chan->mode) {
3282 case L2CAP_MODE_BASIC:
3283 if (disable_ertm)
3284 break;
3285
3286 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3287 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3288 break;
3289
3290 rfc.mode = L2CAP_MODE_BASIC;
3291 rfc.txwin_size = 0;
3292 rfc.max_transmit = 0;
3293 rfc.retrans_timeout = 0;
3294 rfc.monitor_timeout = 0;
3295 rfc.max_pdu_size = 0;
3296
3297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3298 (unsigned long) &rfc, endptr - ptr);
3299 break;
3300
3301 case L2CAP_MODE_ERTM:
3302 rfc.mode = L2CAP_MODE_ERTM;
3303 rfc.max_transmit = chan->max_tx;
3304
3305 __l2cap_set_ertm_timeouts(chan, &rfc);
3306
3307 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3308 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3309 L2CAP_FCS_SIZE);
3310 rfc.max_pdu_size = cpu_to_le16(size);
3311
3312 l2cap_txwin_setup(chan);
3313
3314 rfc.txwin_size = min_t(u16, chan->tx_win,
3315 L2CAP_DEFAULT_TX_WINDOW);
3316
3317 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3318 (unsigned long) &rfc, endptr - ptr);
3319
3320 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3321 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3322
3323 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3324 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3325 chan->tx_win, endptr - ptr);
3326
3327 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3328 if (chan->fcs == L2CAP_FCS_NONE ||
3329 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3330 chan->fcs = L2CAP_FCS_NONE;
3331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3332 chan->fcs, endptr - ptr);
3333 }
3334 break;
3335
3336 case L2CAP_MODE_STREAMING:
3337 l2cap_txwin_setup(chan);
3338 rfc.mode = L2CAP_MODE_STREAMING;
3339 rfc.txwin_size = 0;
3340 rfc.max_transmit = 0;
3341 rfc.retrans_timeout = 0;
3342 rfc.monitor_timeout = 0;
3343
3344 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3345 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3346 L2CAP_FCS_SIZE);
3347 rfc.max_pdu_size = cpu_to_le16(size);
3348
3349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 (unsigned long) &rfc, endptr - ptr);
3351
3352 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3353 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3354
3355 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3356 if (chan->fcs == L2CAP_FCS_NONE ||
3357 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3358 chan->fcs = L2CAP_FCS_NONE;
3359 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3360 chan->fcs, endptr - ptr);
3361 }
3362 break;
3363 }
3364
3365 req->dcid = cpu_to_le16(chan->dcid);
3366 req->flags = cpu_to_le16(0);
3367
3368 return ptr - data;
3369 }
3370
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3371 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3372 {
3373 struct l2cap_conf_rsp *rsp = data;
3374 void *ptr = rsp->data;
3375 void *endptr = data + data_size;
3376 void *req = chan->conf_req;
3377 int len = chan->conf_len;
3378 int type, hint, olen;
3379 unsigned long val;
3380 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3381 struct l2cap_conf_efs efs;
3382 u8 remote_efs = 0;
3383 u16 mtu = 0;
3384 u16 result = L2CAP_CONF_SUCCESS;
3385 u16 size;
3386
3387 BT_DBG("chan %p", chan);
3388
3389 while (len >= L2CAP_CONF_OPT_SIZE) {
3390 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3391 if (len < 0)
3392 break;
3393
3394 hint = type & L2CAP_CONF_HINT;
3395 type &= L2CAP_CONF_MASK;
3396
3397 switch (type) {
3398 case L2CAP_CONF_MTU:
3399 if (olen != 2)
3400 break;
3401 mtu = val;
3402 break;
3403
3404 case L2CAP_CONF_FLUSH_TO:
3405 if (olen != 2)
3406 break;
3407 chan->flush_to = val;
3408 break;
3409
3410 case L2CAP_CONF_QOS:
3411 break;
3412
3413 case L2CAP_CONF_RFC:
3414 if (olen != sizeof(rfc))
3415 break;
3416 memcpy(&rfc, (void *) val, olen);
3417 break;
3418
3419 case L2CAP_CONF_FCS:
3420 if (olen != 1)
3421 break;
3422 if (val == L2CAP_FCS_NONE)
3423 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3424 break;
3425
3426 case L2CAP_CONF_EFS:
3427 if (olen != sizeof(efs))
3428 break;
3429 remote_efs = 1;
3430 memcpy(&efs, (void *) val, olen);
3431 break;
3432
3433 case L2CAP_CONF_EWS:
3434 if (olen != 2)
3435 break;
3436 return -ECONNREFUSED;
3437
3438 default:
3439 if (hint)
3440 break;
3441 result = L2CAP_CONF_UNKNOWN;
3442 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3443 break;
3444 }
3445 }
3446
3447 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3448 goto done;
3449
3450 switch (chan->mode) {
3451 case L2CAP_MODE_STREAMING:
3452 case L2CAP_MODE_ERTM:
3453 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3454 chan->mode = l2cap_select_mode(rfc.mode,
3455 chan->conn->feat_mask);
3456 break;
3457 }
3458
3459 if (remote_efs) {
3460 if (__l2cap_efs_supported(chan->conn))
3461 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3462 else
3463 return -ECONNREFUSED;
3464 }
3465
3466 if (chan->mode != rfc.mode)
3467 return -ECONNREFUSED;
3468
3469 break;
3470 }
3471
3472 done:
3473 if (chan->mode != rfc.mode) {
3474 result = L2CAP_CONF_UNACCEPT;
3475 rfc.mode = chan->mode;
3476
3477 if (chan->num_conf_rsp == 1)
3478 return -ECONNREFUSED;
3479
3480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3481 (unsigned long) &rfc, endptr - ptr);
3482 }
3483
3484 if (result == L2CAP_CONF_SUCCESS) {
3485 /* Configure output options and let the other side know
3486 * which ones we don't like. */
3487
3488 /* If MTU is not provided in configure request, try adjusting it
3489 * to the current output MTU if it has been set
3490 *
3491 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3492 *
3493 * Each configuration parameter value (if any is present) in an
3494 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3495 * configuration parameter value that has been sent (or, in case
3496 * of default values, implied) in the corresponding
3497 * L2CAP_CONFIGURATION_REQ packet.
3498 */
3499 if (!mtu) {
3500 /* Only adjust for ERTM channels as for older modes the
3501 * remote stack may not be able to detect that the
3502 * adjustment causing it to silently drop packets.
3503 */
3504 if (chan->mode == L2CAP_MODE_ERTM &&
3505 chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3506 mtu = chan->omtu;
3507 else
3508 mtu = L2CAP_DEFAULT_MTU;
3509 }
3510
3511 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3512 result = L2CAP_CONF_UNACCEPT;
3513 else {
3514 chan->omtu = mtu;
3515 set_bit(CONF_MTU_DONE, &chan->conf_state);
3516 }
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3518
3519 if (remote_efs) {
3520 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3521 efs.stype != L2CAP_SERV_NOTRAFIC &&
3522 efs.stype != chan->local_stype) {
3523
3524 result = L2CAP_CONF_UNACCEPT;
3525
3526 if (chan->num_conf_req >= 1)
3527 return -ECONNREFUSED;
3528
3529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3530 sizeof(efs),
3531 (unsigned long) &efs, endptr - ptr);
3532 } else {
3533 /* Send PENDING Conf Rsp */
3534 result = L2CAP_CONF_PENDING;
3535 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3536 }
3537 }
3538
3539 switch (rfc.mode) {
3540 case L2CAP_MODE_BASIC:
3541 chan->fcs = L2CAP_FCS_NONE;
3542 set_bit(CONF_MODE_DONE, &chan->conf_state);
3543 break;
3544
3545 case L2CAP_MODE_ERTM:
3546 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3547 chan->remote_tx_win = rfc.txwin_size;
3548 else
3549 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3550
3551 chan->remote_max_tx = rfc.max_transmit;
3552
3553 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3554 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3555 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3556 rfc.max_pdu_size = cpu_to_le16(size);
3557 chan->remote_mps = size;
3558
3559 __l2cap_set_ertm_timeouts(chan, &rfc);
3560
3561 set_bit(CONF_MODE_DONE, &chan->conf_state);
3562
3563 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3564 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3565
3566 if (remote_efs &&
3567 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3568 chan->remote_id = efs.id;
3569 chan->remote_stype = efs.stype;
3570 chan->remote_msdu = le16_to_cpu(efs.msdu);
3571 chan->remote_flush_to =
3572 le32_to_cpu(efs.flush_to);
3573 chan->remote_acc_lat =
3574 le32_to_cpu(efs.acc_lat);
3575 chan->remote_sdu_itime =
3576 le32_to_cpu(efs.sdu_itime);
3577 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3578 sizeof(efs),
3579 (unsigned long) &efs, endptr - ptr);
3580 }
3581 break;
3582
3583 case L2CAP_MODE_STREAMING:
3584 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3585 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3586 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3587 rfc.max_pdu_size = cpu_to_le16(size);
3588 chan->remote_mps = size;
3589
3590 set_bit(CONF_MODE_DONE, &chan->conf_state);
3591
3592 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3593 (unsigned long) &rfc, endptr - ptr);
3594
3595 break;
3596
3597 default:
3598 result = L2CAP_CONF_UNACCEPT;
3599
3600 memset(&rfc, 0, sizeof(rfc));
3601 rfc.mode = chan->mode;
3602 }
3603
3604 if (result == L2CAP_CONF_SUCCESS)
3605 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3606 }
3607 rsp->scid = cpu_to_le16(chan->dcid);
3608 rsp->result = cpu_to_le16(result);
3609 rsp->flags = cpu_to_le16(0);
3610
3611 return ptr - data;
3612 }
3613
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3614 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3615 void *data, size_t size, u16 *result)
3616 {
3617 struct l2cap_conf_req *req = data;
3618 void *ptr = req->data;
3619 void *endptr = data + size;
3620 int type, olen;
3621 unsigned long val;
3622 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3623 struct l2cap_conf_efs efs;
3624
3625 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3626
3627 while (len >= L2CAP_CONF_OPT_SIZE) {
3628 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3629 if (len < 0)
3630 break;
3631
3632 switch (type) {
3633 case L2CAP_CONF_MTU:
3634 if (olen != 2)
3635 break;
3636 if (val < L2CAP_DEFAULT_MIN_MTU) {
3637 *result = L2CAP_CONF_UNACCEPT;
3638 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3639 } else
3640 chan->imtu = val;
3641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3642 endptr - ptr);
3643 break;
3644
3645 case L2CAP_CONF_FLUSH_TO:
3646 if (olen != 2)
3647 break;
3648 chan->flush_to = val;
3649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3650 chan->flush_to, endptr - ptr);
3651 break;
3652
3653 case L2CAP_CONF_RFC:
3654 if (olen != sizeof(rfc))
3655 break;
3656 memcpy(&rfc, (void *)val, olen);
3657 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3658 rfc.mode != chan->mode)
3659 return -ECONNREFUSED;
3660 chan->fcs = 0;
3661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3662 (unsigned long) &rfc, endptr - ptr);
3663 break;
3664
3665 case L2CAP_CONF_EWS:
3666 if (olen != 2)
3667 break;
3668 chan->ack_win = min_t(u16, val, chan->ack_win);
3669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3670 chan->tx_win, endptr - ptr);
3671 break;
3672
3673 case L2CAP_CONF_EFS:
3674 if (olen != sizeof(efs))
3675 break;
3676 memcpy(&efs, (void *)val, olen);
3677 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3678 efs.stype != L2CAP_SERV_NOTRAFIC &&
3679 efs.stype != chan->local_stype)
3680 return -ECONNREFUSED;
3681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3682 (unsigned long) &efs, endptr - ptr);
3683 break;
3684
3685 case L2CAP_CONF_FCS:
3686 if (olen != 1)
3687 break;
3688 if (*result == L2CAP_CONF_PENDING)
3689 if (val == L2CAP_FCS_NONE)
3690 set_bit(CONF_RECV_NO_FCS,
3691 &chan->conf_state);
3692 break;
3693 }
3694 }
3695
3696 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3697 return -ECONNREFUSED;
3698
3699 chan->mode = rfc.mode;
3700
3701 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3702 switch (rfc.mode) {
3703 case L2CAP_MODE_ERTM:
3704 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3705 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3706 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3707 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3708 chan->ack_win = min_t(u16, chan->ack_win,
3709 rfc.txwin_size);
3710
3711 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3712 chan->local_msdu = le16_to_cpu(efs.msdu);
3713 chan->local_sdu_itime =
3714 le32_to_cpu(efs.sdu_itime);
3715 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3716 chan->local_flush_to =
3717 le32_to_cpu(efs.flush_to);
3718 }
3719 break;
3720
3721 case L2CAP_MODE_STREAMING:
3722 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3723 }
3724 }
3725
3726 req->dcid = cpu_to_le16(chan->dcid);
3727 req->flags = cpu_to_le16(0);
3728
3729 return ptr - data;
3730 }
3731
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3732 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3733 u16 result, u16 flags)
3734 {
3735 struct l2cap_conf_rsp *rsp = data;
3736 void *ptr = rsp->data;
3737
3738 BT_DBG("chan %p", chan);
3739
3740 rsp->scid = cpu_to_le16(chan->dcid);
3741 rsp->result = cpu_to_le16(result);
3742 rsp->flags = cpu_to_le16(flags);
3743
3744 return ptr - data;
3745 }
3746
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3747 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3748 {
3749 struct l2cap_le_conn_rsp rsp;
3750 struct l2cap_conn *conn = chan->conn;
3751
3752 BT_DBG("chan %p", chan);
3753
3754 rsp.dcid = cpu_to_le16(chan->scid);
3755 rsp.mtu = cpu_to_le16(chan->imtu);
3756 rsp.mps = cpu_to_le16(chan->mps);
3757 rsp.credits = cpu_to_le16(chan->rx_credits);
3758 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3759
3760 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3761 &rsp);
3762 }
3763
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3764 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3765 {
3766 int *result = data;
3767
3768 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3769 return;
3770
3771 switch (chan->state) {
3772 case BT_CONNECT2:
3773 /* If channel still pending accept add to result */
3774 (*result)++;
3775 return;
3776 case BT_CONNECTED:
3777 return;
3778 default:
3779 /* If not connected or pending accept it has been refused */
3780 *result = -ECONNREFUSED;
3781 return;
3782 }
3783 }
3784
3785 struct l2cap_ecred_rsp_data {
3786 struct {
3787 struct l2cap_ecred_conn_rsp rsp;
3788 __le16 scid[L2CAP_ECRED_MAX_CID];
3789 } __packed pdu;
3790 int count;
3791 };
3792
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3793 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3794 {
3795 struct l2cap_ecred_rsp_data *rsp = data;
3796
3797 /* Check if channel for outgoing connection or if it wasn't deferred
3798 * since in those cases it must be skipped.
3799 */
3800 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3801 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3802 return;
3803
3804 /* Reset ident so only one response is sent */
3805 chan->ident = 0;
3806
3807 /* Include all channels pending with the same ident */
3808 if (!rsp->pdu.rsp.result)
3809 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3810 else
3811 l2cap_chan_del(chan, ECONNRESET);
3812 }
3813
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3814 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3815 {
3816 struct l2cap_conn *conn = chan->conn;
3817 struct l2cap_ecred_rsp_data data;
3818 u16 id = chan->ident;
3819 int result = 0;
3820
3821 if (!id)
3822 return;
3823
3824 BT_DBG("chan %p id %d", chan, id);
3825
3826 memset(&data, 0, sizeof(data));
3827
3828 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3829 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3830 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3831 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3832
3833 /* Verify that all channels are ready */
3834 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3835
3836 if (result > 0)
3837 return;
3838
3839 if (result < 0)
3840 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3841
3842 /* Build response */
3843 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3844
3845 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3846 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3847 &data.pdu);
3848 }
3849
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3850 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3851 {
3852 struct l2cap_conn_rsp rsp;
3853 struct l2cap_conn *conn = chan->conn;
3854 u8 buf[128];
3855 u8 rsp_code;
3856
3857 rsp.scid = cpu_to_le16(chan->dcid);
3858 rsp.dcid = cpu_to_le16(chan->scid);
3859 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3860 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3861 rsp_code = L2CAP_CONN_RSP;
3862
3863 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3864
3865 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3866
3867 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3868 return;
3869
3870 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3871 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3872 chan->num_conf_req++;
3873 }
3874
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3875 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3876 {
3877 int type, olen;
3878 unsigned long val;
3879 /* Use sane default values in case a misbehaving remote device
3880 * did not send an RFC or extended window size option.
3881 */
3882 u16 txwin_ext = chan->ack_win;
3883 struct l2cap_conf_rfc rfc = {
3884 .mode = chan->mode,
3885 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3886 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3887 .max_pdu_size = cpu_to_le16(chan->imtu),
3888 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3889 };
3890
3891 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3892
3893 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3894 return;
3895
3896 while (len >= L2CAP_CONF_OPT_SIZE) {
3897 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3898 if (len < 0)
3899 break;
3900
3901 switch (type) {
3902 case L2CAP_CONF_RFC:
3903 if (olen != sizeof(rfc))
3904 break;
3905 memcpy(&rfc, (void *)val, olen);
3906 break;
3907 case L2CAP_CONF_EWS:
3908 if (olen != 2)
3909 break;
3910 txwin_ext = val;
3911 break;
3912 }
3913 }
3914
3915 switch (rfc.mode) {
3916 case L2CAP_MODE_ERTM:
3917 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3918 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3919 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3920 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3921 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3922 else
3923 chan->ack_win = min_t(u16, chan->ack_win,
3924 rfc.txwin_size);
3925 break;
3926 case L2CAP_MODE_STREAMING:
3927 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3928 }
3929 }
3930
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3931 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3932 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3933 u8 *data)
3934 {
3935 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3936
3937 if (cmd_len < sizeof(*rej))
3938 return -EPROTO;
3939
3940 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3941 return 0;
3942
3943 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3944 cmd->ident == conn->info_ident) {
3945 cancel_delayed_work(&conn->info_timer);
3946
3947 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3948 conn->info_ident = 0;
3949
3950 l2cap_conn_start(conn);
3951 }
3952
3953 return 0;
3954 }
3955
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)3956 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3957 u8 *data, u8 rsp_code)
3958 {
3959 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3960 struct l2cap_conn_rsp rsp;
3961 struct l2cap_chan *chan = NULL, *pchan = NULL;
3962 int result, status = L2CAP_CS_NO_INFO;
3963
3964 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3965 __le16 psm = req->psm;
3966
3967 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3968
3969 /* Check if we have socket listening on psm */
3970 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3971 &conn->hcon->dst, ACL_LINK);
3972 if (!pchan) {
3973 result = L2CAP_CR_BAD_PSM;
3974 goto response;
3975 }
3976
3977 l2cap_chan_lock(pchan);
3978
3979 /* Check if the ACL is secure enough (if not SDP) */
3980 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3981 (!hci_conn_check_link_mode(conn->hcon) ||
3982 !l2cap_check_enc_key_size(conn->hcon, pchan))) {
3983 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3984 result = L2CAP_CR_SEC_BLOCK;
3985 goto response;
3986 }
3987
3988 result = L2CAP_CR_NO_MEM;
3989
3990 /* Check for valid dynamic CID range (as per Erratum 3253) */
3991 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3992 result = L2CAP_CR_INVALID_SCID;
3993 goto response;
3994 }
3995
3996 /* Check if we already have channel with that dcid */
3997 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3998 result = L2CAP_CR_SCID_IN_USE;
3999 goto response;
4000 }
4001
4002 chan = pchan->ops->new_connection(pchan);
4003 if (!chan)
4004 goto response;
4005
4006 /* For certain devices (ex: HID mouse), support for authentication,
4007 * pairing and bonding is optional. For such devices, inorder to avoid
4008 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4009 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4010 */
4011 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4012
4013 bacpy(&chan->src, &conn->hcon->src);
4014 bacpy(&chan->dst, &conn->hcon->dst);
4015 chan->src_type = bdaddr_src_type(conn->hcon);
4016 chan->dst_type = bdaddr_dst_type(conn->hcon);
4017 chan->psm = psm;
4018 chan->dcid = scid;
4019
4020 __l2cap_chan_add(conn, chan);
4021
4022 dcid = chan->scid;
4023
4024 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4025
4026 chan->ident = cmd->ident;
4027
4028 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4029 if (l2cap_chan_check_security(chan, false)) {
4030 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4031 l2cap_state_change(chan, BT_CONNECT2);
4032 result = L2CAP_CR_PEND;
4033 status = L2CAP_CS_AUTHOR_PEND;
4034 chan->ops->defer(chan);
4035 } else {
4036 l2cap_state_change(chan, BT_CONFIG);
4037 result = L2CAP_CR_SUCCESS;
4038 status = L2CAP_CS_NO_INFO;
4039 }
4040 } else {
4041 l2cap_state_change(chan, BT_CONNECT2);
4042 result = L2CAP_CR_PEND;
4043 status = L2CAP_CS_AUTHEN_PEND;
4044 }
4045 } else {
4046 l2cap_state_change(chan, BT_CONNECT2);
4047 result = L2CAP_CR_PEND;
4048 status = L2CAP_CS_NO_INFO;
4049 }
4050
4051 response:
4052 rsp.scid = cpu_to_le16(scid);
4053 rsp.dcid = cpu_to_le16(dcid);
4054 rsp.result = cpu_to_le16(result);
4055 rsp.status = cpu_to_le16(status);
4056 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4057
4058 if (!pchan)
4059 return;
4060
4061 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4062 struct l2cap_info_req info;
4063 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4064
4065 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4066 conn->info_ident = l2cap_get_ident(conn);
4067
4068 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4069
4070 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4071 sizeof(info), &info);
4072 }
4073
4074 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4075 result == L2CAP_CR_SUCCESS) {
4076 u8 buf[128];
4077 set_bit(CONF_REQ_SENT, &chan->conf_state);
4078 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4079 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4080 chan->num_conf_req++;
4081 }
4082
4083 l2cap_chan_unlock(pchan);
4084 l2cap_chan_put(pchan);
4085 }
4086
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4087 static int l2cap_connect_req(struct l2cap_conn *conn,
4088 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4089 {
4090 if (cmd_len < sizeof(struct l2cap_conn_req))
4091 return -EPROTO;
4092
4093 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4094 return 0;
4095 }
4096
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4097 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4098 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4099 u8 *data)
4100 {
4101 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4102 u16 scid, dcid, result, status;
4103 struct l2cap_chan *chan;
4104 u8 req[128];
4105 int err;
4106
4107 if (cmd_len < sizeof(*rsp))
4108 return -EPROTO;
4109
4110 scid = __le16_to_cpu(rsp->scid);
4111 dcid = __le16_to_cpu(rsp->dcid);
4112 result = __le16_to_cpu(rsp->result);
4113 status = __le16_to_cpu(rsp->status);
4114
4115 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4116 dcid > L2CAP_CID_DYN_END))
4117 return -EPROTO;
4118
4119 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4120 dcid, scid, result, status);
4121
4122 if (scid) {
4123 chan = __l2cap_get_chan_by_scid(conn, scid);
4124 if (!chan)
4125 return -EBADSLT;
4126 } else {
4127 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4128 if (!chan)
4129 return -EBADSLT;
4130 }
4131
4132 chan = l2cap_chan_hold_unless_zero(chan);
4133 if (!chan)
4134 return -EBADSLT;
4135
4136 err = 0;
4137
4138 l2cap_chan_lock(chan);
4139
4140 switch (result) {
4141 case L2CAP_CR_SUCCESS:
4142 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4143 err = -EBADSLT;
4144 break;
4145 }
4146
4147 l2cap_state_change(chan, BT_CONFIG);
4148 chan->ident = 0;
4149 chan->dcid = dcid;
4150 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4151
4152 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4153 break;
4154
4155 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4156 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4157 chan->num_conf_req++;
4158 break;
4159
4160 case L2CAP_CR_PEND:
4161 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4162 break;
4163
4164 default:
4165 l2cap_chan_del(chan, ECONNREFUSED);
4166 break;
4167 }
4168
4169 l2cap_chan_unlock(chan);
4170 l2cap_chan_put(chan);
4171
4172 return err;
4173 }
4174
set_default_fcs(struct l2cap_chan * chan)4175 static inline void set_default_fcs(struct l2cap_chan *chan)
4176 {
4177 /* FCS is enabled only in ERTM or streaming mode, if one or both
4178 * sides request it.
4179 */
4180 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4181 chan->fcs = L2CAP_FCS_NONE;
4182 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4183 chan->fcs = L2CAP_FCS_CRC16;
4184 }
4185
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4186 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4187 u8 ident, u16 flags)
4188 {
4189 struct l2cap_conn *conn = chan->conn;
4190
4191 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4192 flags);
4193
4194 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4195 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4196
4197 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4198 l2cap_build_conf_rsp(chan, data,
4199 L2CAP_CONF_SUCCESS, flags), data);
4200 }
4201
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4202 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4203 u16 scid, u16 dcid)
4204 {
4205 struct l2cap_cmd_rej_cid rej;
4206
4207 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4208 rej.scid = __cpu_to_le16(scid);
4209 rej.dcid = __cpu_to_le16(dcid);
4210
4211 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4212 }
4213
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4214 static inline int l2cap_config_req(struct l2cap_conn *conn,
4215 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4216 u8 *data)
4217 {
4218 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4219 u16 dcid, flags;
4220 u8 rsp[64];
4221 struct l2cap_chan *chan;
4222 int len, err = 0;
4223
4224 if (cmd_len < sizeof(*req))
4225 return -EPROTO;
4226
4227 dcid = __le16_to_cpu(req->dcid);
4228 flags = __le16_to_cpu(req->flags);
4229
4230 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4231
4232 chan = l2cap_get_chan_by_scid(conn, dcid);
4233 if (!chan) {
4234 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4235 return 0;
4236 }
4237
4238 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4239 chan->state != BT_CONNECTED) {
4240 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4241 chan->dcid);
4242 goto unlock;
4243 }
4244
4245 /* Reject if config buffer is too small. */
4246 len = cmd_len - sizeof(*req);
4247 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4248 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4249 l2cap_build_conf_rsp(chan, rsp,
4250 L2CAP_CONF_REJECT, flags), rsp);
4251 goto unlock;
4252 }
4253
4254 /* Store config. */
4255 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4256 chan->conf_len += len;
4257
4258 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4259 /* Incomplete config. Send empty response. */
4260 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4261 l2cap_build_conf_rsp(chan, rsp,
4262 L2CAP_CONF_SUCCESS, flags), rsp);
4263 goto unlock;
4264 }
4265
4266 /* Complete config. */
4267 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4268 if (len < 0) {
4269 l2cap_send_disconn_req(chan, ECONNRESET);
4270 goto unlock;
4271 }
4272
4273 chan->ident = cmd->ident;
4274 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4275 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4276 chan->num_conf_rsp++;
4277
4278 /* Reset config buffer. */
4279 chan->conf_len = 0;
4280
4281 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4282 goto unlock;
4283
4284 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4285 set_default_fcs(chan);
4286
4287 if (chan->mode == L2CAP_MODE_ERTM ||
4288 chan->mode == L2CAP_MODE_STREAMING)
4289 err = l2cap_ertm_init(chan);
4290
4291 if (err < 0)
4292 l2cap_send_disconn_req(chan, -err);
4293 else
4294 l2cap_chan_ready(chan);
4295
4296 goto unlock;
4297 }
4298
4299 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4300 u8 buf[64];
4301 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4302 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4303 chan->num_conf_req++;
4304 }
4305
4306 /* Got Conf Rsp PENDING from remote side and assume we sent
4307 Conf Rsp PENDING in the code above */
4308 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4309 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4310
4311 /* check compatibility */
4312
4313 /* Send rsp for BR/EDR channel */
4314 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4315 }
4316
4317 unlock:
4318 l2cap_chan_unlock(chan);
4319 l2cap_chan_put(chan);
4320 return err;
4321 }
4322
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4323 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4324 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4325 u8 *data)
4326 {
4327 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4328 u16 scid, flags, result;
4329 struct l2cap_chan *chan;
4330 int len = cmd_len - sizeof(*rsp);
4331 int err = 0;
4332
4333 if (cmd_len < sizeof(*rsp))
4334 return -EPROTO;
4335
4336 scid = __le16_to_cpu(rsp->scid);
4337 flags = __le16_to_cpu(rsp->flags);
4338 result = __le16_to_cpu(rsp->result);
4339
4340 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4341 result, len);
4342
4343 chan = l2cap_get_chan_by_scid(conn, scid);
4344 if (!chan)
4345 return 0;
4346
4347 switch (result) {
4348 case L2CAP_CONF_SUCCESS:
4349 l2cap_conf_rfc_get(chan, rsp->data, len);
4350 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4351 break;
4352
4353 case L2CAP_CONF_PENDING:
4354 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4355
4356 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4357 char buf[64];
4358
4359 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4360 buf, sizeof(buf), &result);
4361 if (len < 0) {
4362 l2cap_send_disconn_req(chan, ECONNRESET);
4363 goto done;
4364 }
4365
4366 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4367 }
4368 goto done;
4369
4370 case L2CAP_CONF_UNKNOWN:
4371 case L2CAP_CONF_UNACCEPT:
4372 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4373 char req[64];
4374
4375 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4376 l2cap_send_disconn_req(chan, ECONNRESET);
4377 goto done;
4378 }
4379
4380 /* throw out any old stored conf requests */
4381 result = L2CAP_CONF_SUCCESS;
4382 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4383 req, sizeof(req), &result);
4384 if (len < 0) {
4385 l2cap_send_disconn_req(chan, ECONNRESET);
4386 goto done;
4387 }
4388
4389 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4390 L2CAP_CONF_REQ, len, req);
4391 chan->num_conf_req++;
4392 if (result != L2CAP_CONF_SUCCESS)
4393 goto done;
4394 break;
4395 }
4396 fallthrough;
4397
4398 default:
4399 l2cap_chan_set_err(chan, ECONNRESET);
4400
4401 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4402 l2cap_send_disconn_req(chan, ECONNRESET);
4403 goto done;
4404 }
4405
4406 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4407 goto done;
4408
4409 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4410
4411 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4412 set_default_fcs(chan);
4413
4414 if (chan->mode == L2CAP_MODE_ERTM ||
4415 chan->mode == L2CAP_MODE_STREAMING)
4416 err = l2cap_ertm_init(chan);
4417
4418 if (err < 0)
4419 l2cap_send_disconn_req(chan, -err);
4420 else
4421 l2cap_chan_ready(chan);
4422 }
4423
4424 done:
4425 l2cap_chan_unlock(chan);
4426 l2cap_chan_put(chan);
4427 return err;
4428 }
4429
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4430 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4431 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4432 u8 *data)
4433 {
4434 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4435 struct l2cap_disconn_rsp rsp;
4436 u16 dcid, scid;
4437 struct l2cap_chan *chan;
4438
4439 if (cmd_len != sizeof(*req))
4440 return -EPROTO;
4441
4442 scid = __le16_to_cpu(req->scid);
4443 dcid = __le16_to_cpu(req->dcid);
4444
4445 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4446
4447 chan = l2cap_get_chan_by_scid(conn, dcid);
4448 if (!chan) {
4449 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4450 return 0;
4451 }
4452
4453 rsp.dcid = cpu_to_le16(chan->scid);
4454 rsp.scid = cpu_to_le16(chan->dcid);
4455 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4456
4457 chan->ops->set_shutdown(chan);
4458
4459 l2cap_chan_del(chan, ECONNRESET);
4460
4461 chan->ops->close(chan);
4462
4463 l2cap_chan_unlock(chan);
4464 l2cap_chan_put(chan);
4465
4466 return 0;
4467 }
4468
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4469 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4470 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4471 u8 *data)
4472 {
4473 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4474 u16 dcid, scid;
4475 struct l2cap_chan *chan;
4476
4477 if (cmd_len != sizeof(*rsp))
4478 return -EPROTO;
4479
4480 scid = __le16_to_cpu(rsp->scid);
4481 dcid = __le16_to_cpu(rsp->dcid);
4482
4483 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4484
4485 chan = l2cap_get_chan_by_scid(conn, scid);
4486 if (!chan) {
4487 return 0;
4488 }
4489
4490 if (chan->state != BT_DISCONN) {
4491 l2cap_chan_unlock(chan);
4492 l2cap_chan_put(chan);
4493 return 0;
4494 }
4495
4496 l2cap_chan_del(chan, 0);
4497
4498 chan->ops->close(chan);
4499
4500 l2cap_chan_unlock(chan);
4501 l2cap_chan_put(chan);
4502
4503 return 0;
4504 }
4505
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4506 static inline int l2cap_information_req(struct l2cap_conn *conn,
4507 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4508 u8 *data)
4509 {
4510 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4511 u16 type;
4512
4513 if (cmd_len != sizeof(*req))
4514 return -EPROTO;
4515
4516 type = __le16_to_cpu(req->type);
4517
4518 BT_DBG("type 0x%4.4x", type);
4519
4520 if (type == L2CAP_IT_FEAT_MASK) {
4521 u8 buf[8];
4522 u32 feat_mask = l2cap_feat_mask;
4523 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4524 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4525 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4526 if (!disable_ertm)
4527 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4528 | L2CAP_FEAT_FCS;
4529
4530 put_unaligned_le32(feat_mask, rsp->data);
4531 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4532 buf);
4533 } else if (type == L2CAP_IT_FIXED_CHAN) {
4534 u8 buf[12];
4535 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4536
4537 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4538 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4539 rsp->data[0] = conn->local_fixed_chan;
4540 memset(rsp->data + 1, 0, 7);
4541 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4542 buf);
4543 } else {
4544 struct l2cap_info_rsp rsp;
4545 rsp.type = cpu_to_le16(type);
4546 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4547 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4548 &rsp);
4549 }
4550
4551 return 0;
4552 }
4553
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4554 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4555 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4556 u8 *data)
4557 {
4558 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4559 u16 type, result;
4560
4561 if (cmd_len < sizeof(*rsp))
4562 return -EPROTO;
4563
4564 type = __le16_to_cpu(rsp->type);
4565 result = __le16_to_cpu(rsp->result);
4566
4567 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4568
4569 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4570 if (cmd->ident != conn->info_ident ||
4571 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4572 return 0;
4573
4574 cancel_delayed_work(&conn->info_timer);
4575
4576 if (result != L2CAP_IR_SUCCESS) {
4577 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4578 conn->info_ident = 0;
4579
4580 l2cap_conn_start(conn);
4581
4582 return 0;
4583 }
4584
4585 switch (type) {
4586 case L2CAP_IT_FEAT_MASK:
4587 conn->feat_mask = get_unaligned_le32(rsp->data);
4588
4589 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4590 struct l2cap_info_req req;
4591 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4592
4593 conn->info_ident = l2cap_get_ident(conn);
4594
4595 l2cap_send_cmd(conn, conn->info_ident,
4596 L2CAP_INFO_REQ, sizeof(req), &req);
4597 } else {
4598 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4599 conn->info_ident = 0;
4600
4601 l2cap_conn_start(conn);
4602 }
4603 break;
4604
4605 case L2CAP_IT_FIXED_CHAN:
4606 conn->remote_fixed_chan = rsp->data[0];
4607 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4608 conn->info_ident = 0;
4609
4610 l2cap_conn_start(conn);
4611 break;
4612 }
4613
4614 return 0;
4615 }
4616
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4617 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4618 struct l2cap_cmd_hdr *cmd,
4619 u16 cmd_len, u8 *data)
4620 {
4621 struct hci_conn *hcon = conn->hcon;
4622 struct l2cap_conn_param_update_req *req;
4623 struct l2cap_conn_param_update_rsp rsp;
4624 u16 min, max, latency, to_multiplier;
4625 int err;
4626
4627 if (hcon->role != HCI_ROLE_MASTER)
4628 return -EINVAL;
4629
4630 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4631 return -EPROTO;
4632
4633 req = (struct l2cap_conn_param_update_req *) data;
4634 min = __le16_to_cpu(req->min);
4635 max = __le16_to_cpu(req->max);
4636 latency = __le16_to_cpu(req->latency);
4637 to_multiplier = __le16_to_cpu(req->to_multiplier);
4638
4639 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4640 min, max, latency, to_multiplier);
4641
4642 memset(&rsp, 0, sizeof(rsp));
4643
4644 err = hci_check_conn_params(min, max, latency, to_multiplier);
4645 if (err)
4646 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4647 else
4648 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4649
4650 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4651 sizeof(rsp), &rsp);
4652
4653 if (!err) {
4654 u8 store_hint;
4655
4656 store_hint = hci_le_conn_update(hcon, min, max, latency,
4657 to_multiplier);
4658 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4659 store_hint, min, max, latency,
4660 to_multiplier);
4661
4662 }
4663
4664 return 0;
4665 }
4666
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4667 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4668 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4669 u8 *data)
4670 {
4671 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4672 struct hci_conn *hcon = conn->hcon;
4673 u16 dcid, mtu, mps, credits, result;
4674 struct l2cap_chan *chan;
4675 int err, sec_level;
4676
4677 if (cmd_len < sizeof(*rsp))
4678 return -EPROTO;
4679
4680 dcid = __le16_to_cpu(rsp->dcid);
4681 mtu = __le16_to_cpu(rsp->mtu);
4682 mps = __le16_to_cpu(rsp->mps);
4683 credits = __le16_to_cpu(rsp->credits);
4684 result = __le16_to_cpu(rsp->result);
4685
4686 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4687 dcid < L2CAP_CID_DYN_START ||
4688 dcid > L2CAP_CID_LE_DYN_END))
4689 return -EPROTO;
4690
4691 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4692 dcid, mtu, mps, credits, result);
4693
4694 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4695 if (!chan)
4696 return -EBADSLT;
4697
4698 err = 0;
4699
4700 l2cap_chan_lock(chan);
4701
4702 switch (result) {
4703 case L2CAP_CR_LE_SUCCESS:
4704 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4705 err = -EBADSLT;
4706 break;
4707 }
4708
4709 chan->ident = 0;
4710 chan->dcid = dcid;
4711 chan->omtu = mtu;
4712 chan->remote_mps = mps;
4713 chan->tx_credits = credits;
4714 l2cap_chan_ready(chan);
4715 break;
4716
4717 case L2CAP_CR_LE_AUTHENTICATION:
4718 case L2CAP_CR_LE_ENCRYPTION:
4719 /* If we already have MITM protection we can't do
4720 * anything.
4721 */
4722 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4723 l2cap_chan_del(chan, ECONNREFUSED);
4724 break;
4725 }
4726
4727 sec_level = hcon->sec_level + 1;
4728 if (chan->sec_level < sec_level)
4729 chan->sec_level = sec_level;
4730
4731 /* We'll need to send a new Connect Request */
4732 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4733
4734 smp_conn_security(hcon, chan->sec_level);
4735 break;
4736
4737 default:
4738 l2cap_chan_del(chan, ECONNREFUSED);
4739 break;
4740 }
4741
4742 l2cap_chan_unlock(chan);
4743
4744 return err;
4745 }
4746
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4747 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4748 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4749 u8 *data)
4750 {
4751 int err = 0;
4752
4753 switch (cmd->code) {
4754 case L2CAP_COMMAND_REJ:
4755 l2cap_command_rej(conn, cmd, cmd_len, data);
4756 break;
4757
4758 case L2CAP_CONN_REQ:
4759 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4760 break;
4761
4762 case L2CAP_CONN_RSP:
4763 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4764 break;
4765
4766 case L2CAP_CONF_REQ:
4767 err = l2cap_config_req(conn, cmd, cmd_len, data);
4768 break;
4769
4770 case L2CAP_CONF_RSP:
4771 l2cap_config_rsp(conn, cmd, cmd_len, data);
4772 break;
4773
4774 case L2CAP_DISCONN_REQ:
4775 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4776 break;
4777
4778 case L2CAP_DISCONN_RSP:
4779 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4780 break;
4781
4782 case L2CAP_ECHO_REQ:
4783 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4784 break;
4785
4786 case L2CAP_ECHO_RSP:
4787 break;
4788
4789 case L2CAP_INFO_REQ:
4790 err = l2cap_information_req(conn, cmd, cmd_len, data);
4791 break;
4792
4793 case L2CAP_INFO_RSP:
4794 l2cap_information_rsp(conn, cmd, cmd_len, data);
4795 break;
4796
4797 default:
4798 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4799 err = -EINVAL;
4800 break;
4801 }
4802
4803 return err;
4804 }
4805
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4806 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4807 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4808 u8 *data)
4809 {
4810 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4811 struct l2cap_le_conn_rsp rsp;
4812 struct l2cap_chan *chan, *pchan;
4813 u16 dcid, scid, credits, mtu, mps;
4814 __le16 psm;
4815 u8 result;
4816
4817 if (cmd_len != sizeof(*req))
4818 return -EPROTO;
4819
4820 scid = __le16_to_cpu(req->scid);
4821 mtu = __le16_to_cpu(req->mtu);
4822 mps = __le16_to_cpu(req->mps);
4823 psm = req->psm;
4824 dcid = 0;
4825 credits = 0;
4826
4827 if (mtu < 23 || mps < 23)
4828 return -EPROTO;
4829
4830 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4831 scid, mtu, mps);
4832
4833 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4834 * page 1059:
4835 *
4836 * Valid range: 0x0001-0x00ff
4837 *
4838 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4839 */
4840 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4841 result = L2CAP_CR_LE_BAD_PSM;
4842 chan = NULL;
4843 goto response;
4844 }
4845
4846 /* Check if we have socket listening on psm */
4847 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4848 &conn->hcon->dst, LE_LINK);
4849 if (!pchan) {
4850 result = L2CAP_CR_LE_BAD_PSM;
4851 chan = NULL;
4852 goto response;
4853 }
4854
4855 l2cap_chan_lock(pchan);
4856
4857 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4858 SMP_ALLOW_STK)) {
4859 result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4860 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4861 chan = NULL;
4862 goto response_unlock;
4863 }
4864
4865 /* Check for valid dynamic CID range */
4866 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4867 result = L2CAP_CR_LE_INVALID_SCID;
4868 chan = NULL;
4869 goto response_unlock;
4870 }
4871
4872 /* Check if we already have channel with that dcid */
4873 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4874 result = L2CAP_CR_LE_SCID_IN_USE;
4875 chan = NULL;
4876 goto response_unlock;
4877 }
4878
4879 chan = pchan->ops->new_connection(pchan);
4880 if (!chan) {
4881 result = L2CAP_CR_LE_NO_MEM;
4882 goto response_unlock;
4883 }
4884
4885 bacpy(&chan->src, &conn->hcon->src);
4886 bacpy(&chan->dst, &conn->hcon->dst);
4887 chan->src_type = bdaddr_src_type(conn->hcon);
4888 chan->dst_type = bdaddr_dst_type(conn->hcon);
4889 chan->psm = psm;
4890 chan->dcid = scid;
4891 chan->omtu = mtu;
4892 chan->remote_mps = mps;
4893
4894 __l2cap_chan_add(conn, chan);
4895
4896 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4897
4898 dcid = chan->scid;
4899 credits = chan->rx_credits;
4900
4901 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4902
4903 chan->ident = cmd->ident;
4904
4905 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4906 l2cap_state_change(chan, BT_CONNECT2);
4907 /* The following result value is actually not defined
4908 * for LE CoC but we use it to let the function know
4909 * that it should bail out after doing its cleanup
4910 * instead of sending a response.
4911 */
4912 result = L2CAP_CR_PEND;
4913 chan->ops->defer(chan);
4914 } else {
4915 l2cap_chan_ready(chan);
4916 result = L2CAP_CR_LE_SUCCESS;
4917 }
4918
4919 response_unlock:
4920 l2cap_chan_unlock(pchan);
4921 l2cap_chan_put(pchan);
4922
4923 if (result == L2CAP_CR_PEND)
4924 return 0;
4925
4926 response:
4927 if (chan) {
4928 rsp.mtu = cpu_to_le16(chan->imtu);
4929 rsp.mps = cpu_to_le16(chan->mps);
4930 } else {
4931 rsp.mtu = 0;
4932 rsp.mps = 0;
4933 }
4934
4935 rsp.dcid = cpu_to_le16(dcid);
4936 rsp.credits = cpu_to_le16(credits);
4937 rsp.result = cpu_to_le16(result);
4938
4939 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4940
4941 return 0;
4942 }
4943
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4944 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4945 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4946 u8 *data)
4947 {
4948 struct l2cap_le_credits *pkt;
4949 struct l2cap_chan *chan;
4950 u16 cid, credits, max_credits;
4951
4952 if (cmd_len != sizeof(*pkt))
4953 return -EPROTO;
4954
4955 pkt = (struct l2cap_le_credits *) data;
4956 cid = __le16_to_cpu(pkt->cid);
4957 credits = __le16_to_cpu(pkt->credits);
4958
4959 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4960
4961 chan = l2cap_get_chan_by_dcid(conn, cid);
4962 if (!chan)
4963 return -EBADSLT;
4964
4965 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4966 if (credits > max_credits) {
4967 BT_ERR("LE credits overflow");
4968 l2cap_send_disconn_req(chan, ECONNRESET);
4969
4970 /* Return 0 so that we don't trigger an unnecessary
4971 * command reject packet.
4972 */
4973 goto unlock;
4974 }
4975
4976 chan->tx_credits += credits;
4977
4978 /* Resume sending */
4979 l2cap_le_flowctl_send(chan);
4980
4981 if (chan->tx_credits)
4982 chan->ops->resume(chan);
4983
4984 unlock:
4985 l2cap_chan_unlock(chan);
4986 l2cap_chan_put(chan);
4987
4988 return 0;
4989 }
4990
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4991 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4992 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4993 u8 *data)
4994 {
4995 struct l2cap_ecred_conn_req *req = (void *) data;
4996 struct {
4997 struct l2cap_ecred_conn_rsp rsp;
4998 __le16 dcid[L2CAP_ECRED_MAX_CID];
4999 } __packed pdu;
5000 struct l2cap_chan *chan, *pchan;
5001 u16 mtu, mps;
5002 __le16 psm;
5003 u8 result, len = 0;
5004 int i, num_scid;
5005 bool defer = false;
5006
5007 if (!enable_ecred)
5008 return -EINVAL;
5009
5010 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5011 result = L2CAP_CR_LE_INVALID_PARAMS;
5012 goto response;
5013 }
5014
5015 cmd_len -= sizeof(*req);
5016 num_scid = cmd_len / sizeof(u16);
5017
5018 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5019 result = L2CAP_CR_LE_INVALID_PARAMS;
5020 goto response;
5021 }
5022
5023 mtu = __le16_to_cpu(req->mtu);
5024 mps = __le16_to_cpu(req->mps);
5025
5026 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5027 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5028 goto response;
5029 }
5030
5031 psm = req->psm;
5032
5033 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5034 * page 1059:
5035 *
5036 * Valid range: 0x0001-0x00ff
5037 *
5038 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5039 */
5040 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5041 result = L2CAP_CR_LE_BAD_PSM;
5042 goto response;
5043 }
5044
5045 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5046
5047 memset(&pdu, 0, sizeof(pdu));
5048
5049 /* Check if we have socket listening on psm */
5050 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5051 &conn->hcon->dst, LE_LINK);
5052 if (!pchan) {
5053 result = L2CAP_CR_LE_BAD_PSM;
5054 goto response;
5055 }
5056
5057 l2cap_chan_lock(pchan);
5058
5059 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5060 SMP_ALLOW_STK)) {
5061 result = L2CAP_CR_LE_AUTHENTICATION;
5062 goto unlock;
5063 }
5064
5065 result = L2CAP_CR_LE_SUCCESS;
5066
5067 for (i = 0; i < num_scid; i++) {
5068 u16 scid = __le16_to_cpu(req->scid[i]);
5069
5070 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5071
5072 pdu.dcid[i] = 0x0000;
5073 len += sizeof(*pdu.dcid);
5074
5075 /* Check for valid dynamic CID range */
5076 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5077 result = L2CAP_CR_LE_INVALID_SCID;
5078 continue;
5079 }
5080
5081 /* Check if we already have channel with that dcid */
5082 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5083 result = L2CAP_CR_LE_SCID_IN_USE;
5084 continue;
5085 }
5086
5087 chan = pchan->ops->new_connection(pchan);
5088 if (!chan) {
5089 result = L2CAP_CR_LE_NO_MEM;
5090 continue;
5091 }
5092
5093 bacpy(&chan->src, &conn->hcon->src);
5094 bacpy(&chan->dst, &conn->hcon->dst);
5095 chan->src_type = bdaddr_src_type(conn->hcon);
5096 chan->dst_type = bdaddr_dst_type(conn->hcon);
5097 chan->psm = psm;
5098 chan->dcid = scid;
5099 chan->omtu = mtu;
5100 chan->remote_mps = mps;
5101
5102 __l2cap_chan_add(conn, chan);
5103
5104 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5105
5106 /* Init response */
5107 if (!pdu.rsp.credits) {
5108 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
5109 pdu.rsp.mps = cpu_to_le16(chan->mps);
5110 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
5111 }
5112
5113 pdu.dcid[i] = cpu_to_le16(chan->scid);
5114
5115 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5116
5117 chan->ident = cmd->ident;
5118 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5119
5120 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5121 l2cap_state_change(chan, BT_CONNECT2);
5122 defer = true;
5123 chan->ops->defer(chan);
5124 } else {
5125 l2cap_chan_ready(chan);
5126 }
5127 }
5128
5129 unlock:
5130 l2cap_chan_unlock(pchan);
5131 l2cap_chan_put(pchan);
5132
5133 response:
5134 pdu.rsp.result = cpu_to_le16(result);
5135
5136 if (defer)
5137 return 0;
5138
5139 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5140 sizeof(pdu.rsp) + len, &pdu);
5141
5142 return 0;
5143 }
5144
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5145 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5146 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5147 u8 *data)
5148 {
5149 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5150 struct hci_conn *hcon = conn->hcon;
5151 u16 mtu, mps, credits, result;
5152 struct l2cap_chan *chan, *tmp;
5153 int err = 0, sec_level;
5154 int i = 0;
5155
5156 if (cmd_len < sizeof(*rsp))
5157 return -EPROTO;
5158
5159 mtu = __le16_to_cpu(rsp->mtu);
5160 mps = __le16_to_cpu(rsp->mps);
5161 credits = __le16_to_cpu(rsp->credits);
5162 result = __le16_to_cpu(rsp->result);
5163
5164 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5165 result);
5166
5167 cmd_len -= sizeof(*rsp);
5168
5169 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5170 u16 dcid;
5171
5172 if (chan->ident != cmd->ident ||
5173 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5174 chan->state == BT_CONNECTED)
5175 continue;
5176
5177 l2cap_chan_lock(chan);
5178
5179 /* Check that there is a dcid for each pending channel */
5180 if (cmd_len < sizeof(dcid)) {
5181 l2cap_chan_del(chan, ECONNREFUSED);
5182 l2cap_chan_unlock(chan);
5183 continue;
5184 }
5185
5186 dcid = __le16_to_cpu(rsp->dcid[i++]);
5187 cmd_len -= sizeof(u16);
5188
5189 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5190
5191 /* Check if dcid is already in use */
5192 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5193 /* If a device receives a
5194 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5195 * already-assigned Destination CID, then both the
5196 * original channel and the new channel shall be
5197 * immediately discarded and not used.
5198 */
5199 l2cap_chan_del(chan, ECONNREFUSED);
5200 l2cap_chan_unlock(chan);
5201 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5202 l2cap_chan_lock(chan);
5203 l2cap_chan_del(chan, ECONNRESET);
5204 l2cap_chan_unlock(chan);
5205 continue;
5206 }
5207
5208 switch (result) {
5209 case L2CAP_CR_LE_AUTHENTICATION:
5210 case L2CAP_CR_LE_ENCRYPTION:
5211 /* If we already have MITM protection we can't do
5212 * anything.
5213 */
5214 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5215 l2cap_chan_del(chan, ECONNREFUSED);
5216 break;
5217 }
5218
5219 sec_level = hcon->sec_level + 1;
5220 if (chan->sec_level < sec_level)
5221 chan->sec_level = sec_level;
5222
5223 /* We'll need to send a new Connect Request */
5224 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5225
5226 smp_conn_security(hcon, chan->sec_level);
5227 break;
5228
5229 case L2CAP_CR_LE_BAD_PSM:
5230 l2cap_chan_del(chan, ECONNREFUSED);
5231 break;
5232
5233 default:
5234 /* If dcid was not set it means channels was refused */
5235 if (!dcid) {
5236 l2cap_chan_del(chan, ECONNREFUSED);
5237 break;
5238 }
5239
5240 chan->ident = 0;
5241 chan->dcid = dcid;
5242 chan->omtu = mtu;
5243 chan->remote_mps = mps;
5244 chan->tx_credits = credits;
5245 l2cap_chan_ready(chan);
5246 break;
5247 }
5248
5249 l2cap_chan_unlock(chan);
5250 }
5251
5252 return err;
5253 }
5254
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5255 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5256 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5257 u8 *data)
5258 {
5259 struct l2cap_ecred_reconf_req *req = (void *) data;
5260 struct l2cap_ecred_reconf_rsp rsp;
5261 u16 mtu, mps, result;
5262 struct l2cap_chan *chan;
5263 int i, num_scid;
5264
5265 if (!enable_ecred)
5266 return -EINVAL;
5267
5268 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5269 result = L2CAP_CR_LE_INVALID_PARAMS;
5270 goto respond;
5271 }
5272
5273 mtu = __le16_to_cpu(req->mtu);
5274 mps = __le16_to_cpu(req->mps);
5275
5276 BT_DBG("mtu %u mps %u", mtu, mps);
5277
5278 if (mtu < L2CAP_ECRED_MIN_MTU) {
5279 result = L2CAP_RECONF_INVALID_MTU;
5280 goto respond;
5281 }
5282
5283 if (mps < L2CAP_ECRED_MIN_MPS) {
5284 result = L2CAP_RECONF_INVALID_MPS;
5285 goto respond;
5286 }
5287
5288 cmd_len -= sizeof(*req);
5289 num_scid = cmd_len / sizeof(u16);
5290 result = L2CAP_RECONF_SUCCESS;
5291
5292 for (i = 0; i < num_scid; i++) {
5293 u16 scid;
5294
5295 scid = __le16_to_cpu(req->scid[i]);
5296 if (!scid)
5297 return -EPROTO;
5298
5299 chan = __l2cap_get_chan_by_dcid(conn, scid);
5300 if (!chan)
5301 continue;
5302
5303 /* If the MTU value is decreased for any of the included
5304 * channels, then the receiver shall disconnect all
5305 * included channels.
5306 */
5307 if (chan->omtu > mtu) {
5308 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5309 chan->omtu, mtu);
5310 result = L2CAP_RECONF_INVALID_MTU;
5311 }
5312
5313 chan->omtu = mtu;
5314 chan->remote_mps = mps;
5315 }
5316
5317 respond:
5318 rsp.result = cpu_to_le16(result);
5319
5320 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5321 &rsp);
5322
5323 return 0;
5324 }
5325
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5326 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5328 u8 *data)
5329 {
5330 struct l2cap_chan *chan, *tmp;
5331 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5332 u16 result;
5333
5334 if (cmd_len < sizeof(*rsp))
5335 return -EPROTO;
5336
5337 result = __le16_to_cpu(rsp->result);
5338
5339 BT_DBG("result 0x%4.4x", rsp->result);
5340
5341 if (!result)
5342 return 0;
5343
5344 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5345 if (chan->ident != cmd->ident)
5346 continue;
5347
5348 l2cap_chan_del(chan, ECONNRESET);
5349 }
5350
5351 return 0;
5352 }
5353
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5354 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5355 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5356 u8 *data)
5357 {
5358 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5359 struct l2cap_chan *chan;
5360
5361 if (cmd_len < sizeof(*rej))
5362 return -EPROTO;
5363
5364 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5365 if (!chan)
5366 goto done;
5367
5368 chan = l2cap_chan_hold_unless_zero(chan);
5369 if (!chan)
5370 goto done;
5371
5372 l2cap_chan_lock(chan);
5373 l2cap_chan_del(chan, ECONNREFUSED);
5374 l2cap_chan_unlock(chan);
5375 l2cap_chan_put(chan);
5376
5377 done:
5378 return 0;
5379 }
5380
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5381 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5382 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5383 u8 *data)
5384 {
5385 int err = 0;
5386
5387 switch (cmd->code) {
5388 case L2CAP_COMMAND_REJ:
5389 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5390 break;
5391
5392 case L2CAP_CONN_PARAM_UPDATE_REQ:
5393 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5394 break;
5395
5396 case L2CAP_CONN_PARAM_UPDATE_RSP:
5397 break;
5398
5399 case L2CAP_LE_CONN_RSP:
5400 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5401 break;
5402
5403 case L2CAP_LE_CONN_REQ:
5404 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5405 break;
5406
5407 case L2CAP_LE_CREDITS:
5408 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5409 break;
5410
5411 case L2CAP_ECRED_CONN_REQ:
5412 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5413 break;
5414
5415 case L2CAP_ECRED_CONN_RSP:
5416 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5417 break;
5418
5419 case L2CAP_ECRED_RECONF_REQ:
5420 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5421 break;
5422
5423 case L2CAP_ECRED_RECONF_RSP:
5424 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5425 break;
5426
5427 case L2CAP_DISCONN_REQ:
5428 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5429 break;
5430
5431 case L2CAP_DISCONN_RSP:
5432 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5433 break;
5434
5435 default:
5436 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5437 err = -EINVAL;
5438 break;
5439 }
5440
5441 return err;
5442 }
5443
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5444 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5445 struct sk_buff *skb)
5446 {
5447 struct hci_conn *hcon = conn->hcon;
5448 struct l2cap_cmd_hdr *cmd;
5449 u16 len;
5450 int err;
5451
5452 if (hcon->type != LE_LINK)
5453 goto drop;
5454
5455 if (skb->len < L2CAP_CMD_HDR_SIZE)
5456 goto drop;
5457
5458 cmd = (void *) skb->data;
5459 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5460
5461 len = le16_to_cpu(cmd->len);
5462
5463 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5464
5465 if (len != skb->len || !cmd->ident) {
5466 BT_DBG("corrupted command");
5467 goto drop;
5468 }
5469
5470 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5471 if (err) {
5472 struct l2cap_cmd_rej_unk rej;
5473
5474 BT_ERR("Wrong link type (%d)", err);
5475
5476 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5477 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5478 sizeof(rej), &rej);
5479 }
5480
5481 drop:
5482 kfree_skb(skb);
5483 }
5484
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5485 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5486 {
5487 struct l2cap_cmd_rej_unk rej;
5488
5489 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5490 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5491 }
5492
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5493 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5494 struct sk_buff *skb)
5495 {
5496 struct hci_conn *hcon = conn->hcon;
5497 struct l2cap_cmd_hdr *cmd;
5498 int err;
5499
5500 l2cap_raw_recv(conn, skb);
5501
5502 if (hcon->type != ACL_LINK)
5503 goto drop;
5504
5505 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5506 u16 len;
5507
5508 cmd = (void *) skb->data;
5509 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5510
5511 len = le16_to_cpu(cmd->len);
5512
5513 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5514 cmd->ident);
5515
5516 if (len > skb->len || !cmd->ident) {
5517 BT_DBG("corrupted command");
5518 l2cap_sig_send_rej(conn, cmd->ident);
5519 skb_pull(skb, len > skb->len ? skb->len : len);
5520 continue;
5521 }
5522
5523 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5524 if (err) {
5525 BT_ERR("Wrong link type (%d)", err);
5526 l2cap_sig_send_rej(conn, cmd->ident);
5527 }
5528
5529 skb_pull(skb, len);
5530 }
5531
5532 if (skb->len > 0) {
5533 BT_DBG("corrupted command");
5534 l2cap_sig_send_rej(conn, 0);
5535 }
5536
5537 drop:
5538 kfree_skb(skb);
5539 }
5540
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5541 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5542 {
5543 u16 our_fcs, rcv_fcs;
5544 int hdr_size;
5545
5546 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5547 hdr_size = L2CAP_EXT_HDR_SIZE;
5548 else
5549 hdr_size = L2CAP_ENH_HDR_SIZE;
5550
5551 if (chan->fcs == L2CAP_FCS_CRC16) {
5552 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5553 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5554 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5555
5556 if (our_fcs != rcv_fcs)
5557 return -EBADMSG;
5558 }
5559 return 0;
5560 }
5561
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5562 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5563 {
5564 struct l2cap_ctrl control;
5565
5566 BT_DBG("chan %p", chan);
5567
5568 memset(&control, 0, sizeof(control));
5569 control.sframe = 1;
5570 control.final = 1;
5571 control.reqseq = chan->buffer_seq;
5572 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5573
5574 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5575 control.super = L2CAP_SUPER_RNR;
5576 l2cap_send_sframe(chan, &control);
5577 }
5578
5579 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5580 chan->unacked_frames > 0)
5581 __set_retrans_timer(chan);
5582
5583 /* Send pending iframes */
5584 l2cap_ertm_send(chan);
5585
5586 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5587 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5588 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5589 * send it now.
5590 */
5591 control.super = L2CAP_SUPER_RR;
5592 l2cap_send_sframe(chan, &control);
5593 }
5594 }
5595
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5596 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5597 struct sk_buff **last_frag)
5598 {
5599 /* skb->len reflects data in skb as well as all fragments
5600 * skb->data_len reflects only data in fragments
5601 */
5602 if (!skb_has_frag_list(skb))
5603 skb_shinfo(skb)->frag_list = new_frag;
5604
5605 new_frag->next = NULL;
5606
5607 (*last_frag)->next = new_frag;
5608 *last_frag = new_frag;
5609
5610 skb->len += new_frag->len;
5611 skb->data_len += new_frag->len;
5612 skb->truesize += new_frag->truesize;
5613 }
5614
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5615 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5616 struct l2cap_ctrl *control)
5617 {
5618 int err = -EINVAL;
5619
5620 switch (control->sar) {
5621 case L2CAP_SAR_UNSEGMENTED:
5622 if (chan->sdu)
5623 break;
5624
5625 err = chan->ops->recv(chan, skb);
5626 break;
5627
5628 case L2CAP_SAR_START:
5629 if (chan->sdu)
5630 break;
5631
5632 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5633 break;
5634
5635 chan->sdu_len = get_unaligned_le16(skb->data);
5636 skb_pull(skb, L2CAP_SDULEN_SIZE);
5637
5638 if (chan->sdu_len > chan->imtu) {
5639 err = -EMSGSIZE;
5640 break;
5641 }
5642
5643 if (skb->len >= chan->sdu_len)
5644 break;
5645
5646 chan->sdu = skb;
5647 chan->sdu_last_frag = skb;
5648
5649 skb = NULL;
5650 err = 0;
5651 break;
5652
5653 case L2CAP_SAR_CONTINUE:
5654 if (!chan->sdu)
5655 break;
5656
5657 append_skb_frag(chan->sdu, skb,
5658 &chan->sdu_last_frag);
5659 skb = NULL;
5660
5661 if (chan->sdu->len >= chan->sdu_len)
5662 break;
5663
5664 err = 0;
5665 break;
5666
5667 case L2CAP_SAR_END:
5668 if (!chan->sdu)
5669 break;
5670
5671 append_skb_frag(chan->sdu, skb,
5672 &chan->sdu_last_frag);
5673 skb = NULL;
5674
5675 if (chan->sdu->len != chan->sdu_len)
5676 break;
5677
5678 err = chan->ops->recv(chan, chan->sdu);
5679
5680 if (!err) {
5681 /* Reassembly complete */
5682 chan->sdu = NULL;
5683 chan->sdu_last_frag = NULL;
5684 chan->sdu_len = 0;
5685 }
5686 break;
5687 }
5688
5689 if (err) {
5690 kfree_skb(skb);
5691 kfree_skb(chan->sdu);
5692 chan->sdu = NULL;
5693 chan->sdu_last_frag = NULL;
5694 chan->sdu_len = 0;
5695 }
5696
5697 return err;
5698 }
5699
l2cap_resegment(struct l2cap_chan * chan)5700 static int l2cap_resegment(struct l2cap_chan *chan)
5701 {
5702 /* Placeholder */
5703 return 0;
5704 }
5705
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5706 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5707 {
5708 u8 event;
5709
5710 if (chan->mode != L2CAP_MODE_ERTM)
5711 return;
5712
5713 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5714 l2cap_tx(chan, NULL, NULL, event);
5715 }
5716
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5717 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5718 {
5719 int err = 0;
5720 /* Pass sequential frames to l2cap_reassemble_sdu()
5721 * until a gap is encountered.
5722 */
5723
5724 BT_DBG("chan %p", chan);
5725
5726 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5727 struct sk_buff *skb;
5728 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5729 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5730
5731 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5732
5733 if (!skb)
5734 break;
5735
5736 skb_unlink(skb, &chan->srej_q);
5737 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5738 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5739 if (err)
5740 break;
5741 }
5742
5743 if (skb_queue_empty(&chan->srej_q)) {
5744 chan->rx_state = L2CAP_RX_STATE_RECV;
5745 l2cap_send_ack(chan);
5746 }
5747
5748 return err;
5749 }
5750
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5751 static void l2cap_handle_srej(struct l2cap_chan *chan,
5752 struct l2cap_ctrl *control)
5753 {
5754 struct sk_buff *skb;
5755
5756 BT_DBG("chan %p, control %p", chan, control);
5757
5758 if (control->reqseq == chan->next_tx_seq) {
5759 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5760 l2cap_send_disconn_req(chan, ECONNRESET);
5761 return;
5762 }
5763
5764 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5765
5766 if (skb == NULL) {
5767 BT_DBG("Seq %d not available for retransmission",
5768 control->reqseq);
5769 return;
5770 }
5771
5772 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5773 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5774 l2cap_send_disconn_req(chan, ECONNRESET);
5775 return;
5776 }
5777
5778 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5779
5780 if (control->poll) {
5781 l2cap_pass_to_tx(chan, control);
5782
5783 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5784 l2cap_retransmit(chan, control);
5785 l2cap_ertm_send(chan);
5786
5787 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5788 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5789 chan->srej_save_reqseq = control->reqseq;
5790 }
5791 } else {
5792 l2cap_pass_to_tx_fbit(chan, control);
5793
5794 if (control->final) {
5795 if (chan->srej_save_reqseq != control->reqseq ||
5796 !test_and_clear_bit(CONN_SREJ_ACT,
5797 &chan->conn_state))
5798 l2cap_retransmit(chan, control);
5799 } else {
5800 l2cap_retransmit(chan, control);
5801 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5802 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5803 chan->srej_save_reqseq = control->reqseq;
5804 }
5805 }
5806 }
5807 }
5808
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5809 static void l2cap_handle_rej(struct l2cap_chan *chan,
5810 struct l2cap_ctrl *control)
5811 {
5812 struct sk_buff *skb;
5813
5814 BT_DBG("chan %p, control %p", chan, control);
5815
5816 if (control->reqseq == chan->next_tx_seq) {
5817 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5818 l2cap_send_disconn_req(chan, ECONNRESET);
5819 return;
5820 }
5821
5822 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5823
5824 if (chan->max_tx && skb &&
5825 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5826 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5827 l2cap_send_disconn_req(chan, ECONNRESET);
5828 return;
5829 }
5830
5831 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5832
5833 l2cap_pass_to_tx(chan, control);
5834
5835 if (control->final) {
5836 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5837 l2cap_retransmit_all(chan, control);
5838 } else {
5839 l2cap_retransmit_all(chan, control);
5840 l2cap_ertm_send(chan);
5841 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5842 set_bit(CONN_REJ_ACT, &chan->conn_state);
5843 }
5844 }
5845
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5846 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5847 {
5848 BT_DBG("chan %p, txseq %d", chan, txseq);
5849
5850 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5851 chan->expected_tx_seq);
5852
5853 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5854 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5855 chan->tx_win) {
5856 /* See notes below regarding "double poll" and
5857 * invalid packets.
5858 */
5859 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5860 BT_DBG("Invalid/Ignore - after SREJ");
5861 return L2CAP_TXSEQ_INVALID_IGNORE;
5862 } else {
5863 BT_DBG("Invalid - in window after SREJ sent");
5864 return L2CAP_TXSEQ_INVALID;
5865 }
5866 }
5867
5868 if (chan->srej_list.head == txseq) {
5869 BT_DBG("Expected SREJ");
5870 return L2CAP_TXSEQ_EXPECTED_SREJ;
5871 }
5872
5873 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5874 BT_DBG("Duplicate SREJ - txseq already stored");
5875 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5876 }
5877
5878 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5879 BT_DBG("Unexpected SREJ - not requested");
5880 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5881 }
5882 }
5883
5884 if (chan->expected_tx_seq == txseq) {
5885 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5886 chan->tx_win) {
5887 BT_DBG("Invalid - txseq outside tx window");
5888 return L2CAP_TXSEQ_INVALID;
5889 } else {
5890 BT_DBG("Expected");
5891 return L2CAP_TXSEQ_EXPECTED;
5892 }
5893 }
5894
5895 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5896 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5897 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5898 return L2CAP_TXSEQ_DUPLICATE;
5899 }
5900
5901 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5902 /* A source of invalid packets is a "double poll" condition,
5903 * where delays cause us to send multiple poll packets. If
5904 * the remote stack receives and processes both polls,
5905 * sequence numbers can wrap around in such a way that a
5906 * resent frame has a sequence number that looks like new data
5907 * with a sequence gap. This would trigger an erroneous SREJ
5908 * request.
5909 *
5910 * Fortunately, this is impossible with a tx window that's
5911 * less than half of the maximum sequence number, which allows
5912 * invalid frames to be safely ignored.
5913 *
5914 * With tx window sizes greater than half of the tx window
5915 * maximum, the frame is invalid and cannot be ignored. This
5916 * causes a disconnect.
5917 */
5918
5919 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5920 BT_DBG("Invalid/Ignore - txseq outside tx window");
5921 return L2CAP_TXSEQ_INVALID_IGNORE;
5922 } else {
5923 BT_DBG("Invalid - txseq outside tx window");
5924 return L2CAP_TXSEQ_INVALID;
5925 }
5926 } else {
5927 BT_DBG("Unexpected - txseq indicates missing frames");
5928 return L2CAP_TXSEQ_UNEXPECTED;
5929 }
5930 }
5931
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)5932 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5933 struct l2cap_ctrl *control,
5934 struct sk_buff *skb, u8 event)
5935 {
5936 struct l2cap_ctrl local_control;
5937 int err = 0;
5938 bool skb_in_use = false;
5939
5940 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5941 event);
5942
5943 switch (event) {
5944 case L2CAP_EV_RECV_IFRAME:
5945 switch (l2cap_classify_txseq(chan, control->txseq)) {
5946 case L2CAP_TXSEQ_EXPECTED:
5947 l2cap_pass_to_tx(chan, control);
5948
5949 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5950 BT_DBG("Busy, discarding expected seq %d",
5951 control->txseq);
5952 break;
5953 }
5954
5955 chan->expected_tx_seq = __next_seq(chan,
5956 control->txseq);
5957
5958 chan->buffer_seq = chan->expected_tx_seq;
5959 skb_in_use = true;
5960
5961 /* l2cap_reassemble_sdu may free skb, hence invalidate
5962 * control, so make a copy in advance to use it after
5963 * l2cap_reassemble_sdu returns and to avoid the race
5964 * condition, for example:
5965 *
5966 * The current thread calls:
5967 * l2cap_reassemble_sdu
5968 * chan->ops->recv == l2cap_sock_recv_cb
5969 * __sock_queue_rcv_skb
5970 * Another thread calls:
5971 * bt_sock_recvmsg
5972 * skb_recv_datagram
5973 * skb_free_datagram
5974 * Then the current thread tries to access control, but
5975 * it was freed by skb_free_datagram.
5976 */
5977 local_control = *control;
5978 err = l2cap_reassemble_sdu(chan, skb, control);
5979 if (err)
5980 break;
5981
5982 if (local_control.final) {
5983 if (!test_and_clear_bit(CONN_REJ_ACT,
5984 &chan->conn_state)) {
5985 local_control.final = 0;
5986 l2cap_retransmit_all(chan, &local_control);
5987 l2cap_ertm_send(chan);
5988 }
5989 }
5990
5991 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5992 l2cap_send_ack(chan);
5993 break;
5994 case L2CAP_TXSEQ_UNEXPECTED:
5995 l2cap_pass_to_tx(chan, control);
5996
5997 /* Can't issue SREJ frames in the local busy state.
5998 * Drop this frame, it will be seen as missing
5999 * when local busy is exited.
6000 */
6001 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6002 BT_DBG("Busy, discarding unexpected seq %d",
6003 control->txseq);
6004 break;
6005 }
6006
6007 /* There was a gap in the sequence, so an SREJ
6008 * must be sent for each missing frame. The
6009 * current frame is stored for later use.
6010 */
6011 skb_queue_tail(&chan->srej_q, skb);
6012 skb_in_use = true;
6013 BT_DBG("Queued %p (queue len %d)", skb,
6014 skb_queue_len(&chan->srej_q));
6015
6016 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6017 l2cap_seq_list_clear(&chan->srej_list);
6018 l2cap_send_srej(chan, control->txseq);
6019
6020 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6021 break;
6022 case L2CAP_TXSEQ_DUPLICATE:
6023 l2cap_pass_to_tx(chan, control);
6024 break;
6025 case L2CAP_TXSEQ_INVALID_IGNORE:
6026 break;
6027 case L2CAP_TXSEQ_INVALID:
6028 default:
6029 l2cap_send_disconn_req(chan, ECONNRESET);
6030 break;
6031 }
6032 break;
6033 case L2CAP_EV_RECV_RR:
6034 l2cap_pass_to_tx(chan, control);
6035 if (control->final) {
6036 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6037
6038 if (!test_and_clear_bit(CONN_REJ_ACT,
6039 &chan->conn_state)) {
6040 control->final = 0;
6041 l2cap_retransmit_all(chan, control);
6042 }
6043
6044 l2cap_ertm_send(chan);
6045 } else if (control->poll) {
6046 l2cap_send_i_or_rr_or_rnr(chan);
6047 } else {
6048 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6049 &chan->conn_state) &&
6050 chan->unacked_frames)
6051 __set_retrans_timer(chan);
6052
6053 l2cap_ertm_send(chan);
6054 }
6055 break;
6056 case L2CAP_EV_RECV_RNR:
6057 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6058 l2cap_pass_to_tx(chan, control);
6059 if (control && control->poll) {
6060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6061 l2cap_send_rr_or_rnr(chan, 0);
6062 }
6063 __clear_retrans_timer(chan);
6064 l2cap_seq_list_clear(&chan->retrans_list);
6065 break;
6066 case L2CAP_EV_RECV_REJ:
6067 l2cap_handle_rej(chan, control);
6068 break;
6069 case L2CAP_EV_RECV_SREJ:
6070 l2cap_handle_srej(chan, control);
6071 break;
6072 default:
6073 break;
6074 }
6075
6076 if (skb && !skb_in_use) {
6077 BT_DBG("Freeing %p", skb);
6078 kfree_skb(skb);
6079 }
6080
6081 return err;
6082 }
6083
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6084 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6085 struct l2cap_ctrl *control,
6086 struct sk_buff *skb, u8 event)
6087 {
6088 int err = 0;
6089 u16 txseq = control->txseq;
6090 bool skb_in_use = false;
6091
6092 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6093 event);
6094
6095 switch (event) {
6096 case L2CAP_EV_RECV_IFRAME:
6097 switch (l2cap_classify_txseq(chan, txseq)) {
6098 case L2CAP_TXSEQ_EXPECTED:
6099 /* Keep frame for reassembly later */
6100 l2cap_pass_to_tx(chan, control);
6101 skb_queue_tail(&chan->srej_q, skb);
6102 skb_in_use = true;
6103 BT_DBG("Queued %p (queue len %d)", skb,
6104 skb_queue_len(&chan->srej_q));
6105
6106 chan->expected_tx_seq = __next_seq(chan, txseq);
6107 break;
6108 case L2CAP_TXSEQ_EXPECTED_SREJ:
6109 l2cap_seq_list_pop(&chan->srej_list);
6110
6111 l2cap_pass_to_tx(chan, control);
6112 skb_queue_tail(&chan->srej_q, skb);
6113 skb_in_use = true;
6114 BT_DBG("Queued %p (queue len %d)", skb,
6115 skb_queue_len(&chan->srej_q));
6116
6117 err = l2cap_rx_queued_iframes(chan);
6118 if (err)
6119 break;
6120
6121 break;
6122 case L2CAP_TXSEQ_UNEXPECTED:
6123 /* Got a frame that can't be reassembled yet.
6124 * Save it for later, and send SREJs to cover
6125 * the missing frames.
6126 */
6127 skb_queue_tail(&chan->srej_q, skb);
6128 skb_in_use = true;
6129 BT_DBG("Queued %p (queue len %d)", skb,
6130 skb_queue_len(&chan->srej_q));
6131
6132 l2cap_pass_to_tx(chan, control);
6133 l2cap_send_srej(chan, control->txseq);
6134 break;
6135 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6136 /* This frame was requested with an SREJ, but
6137 * some expected retransmitted frames are
6138 * missing. Request retransmission of missing
6139 * SREJ'd frames.
6140 */
6141 skb_queue_tail(&chan->srej_q, skb);
6142 skb_in_use = true;
6143 BT_DBG("Queued %p (queue len %d)", skb,
6144 skb_queue_len(&chan->srej_q));
6145
6146 l2cap_pass_to_tx(chan, control);
6147 l2cap_send_srej_list(chan, control->txseq);
6148 break;
6149 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6150 /* We've already queued this frame. Drop this copy. */
6151 l2cap_pass_to_tx(chan, control);
6152 break;
6153 case L2CAP_TXSEQ_DUPLICATE:
6154 /* Expecting a later sequence number, so this frame
6155 * was already received. Ignore it completely.
6156 */
6157 break;
6158 case L2CAP_TXSEQ_INVALID_IGNORE:
6159 break;
6160 case L2CAP_TXSEQ_INVALID:
6161 default:
6162 l2cap_send_disconn_req(chan, ECONNRESET);
6163 break;
6164 }
6165 break;
6166 case L2CAP_EV_RECV_RR:
6167 l2cap_pass_to_tx(chan, control);
6168 if (control->final) {
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6170
6171 if (!test_and_clear_bit(CONN_REJ_ACT,
6172 &chan->conn_state)) {
6173 control->final = 0;
6174 l2cap_retransmit_all(chan, control);
6175 }
6176
6177 l2cap_ertm_send(chan);
6178 } else if (control->poll) {
6179 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6180 &chan->conn_state) &&
6181 chan->unacked_frames) {
6182 __set_retrans_timer(chan);
6183 }
6184
6185 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6186 l2cap_send_srej_tail(chan);
6187 } else {
6188 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6189 &chan->conn_state) &&
6190 chan->unacked_frames)
6191 __set_retrans_timer(chan);
6192
6193 l2cap_send_ack(chan);
6194 }
6195 break;
6196 case L2CAP_EV_RECV_RNR:
6197 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6198 l2cap_pass_to_tx(chan, control);
6199 if (control->poll) {
6200 l2cap_send_srej_tail(chan);
6201 } else {
6202 struct l2cap_ctrl rr_control;
6203 memset(&rr_control, 0, sizeof(rr_control));
6204 rr_control.sframe = 1;
6205 rr_control.super = L2CAP_SUPER_RR;
6206 rr_control.reqseq = chan->buffer_seq;
6207 l2cap_send_sframe(chan, &rr_control);
6208 }
6209
6210 break;
6211 case L2CAP_EV_RECV_REJ:
6212 l2cap_handle_rej(chan, control);
6213 break;
6214 case L2CAP_EV_RECV_SREJ:
6215 l2cap_handle_srej(chan, control);
6216 break;
6217 }
6218
6219 if (skb && !skb_in_use) {
6220 BT_DBG("Freeing %p", skb);
6221 kfree_skb(skb);
6222 }
6223
6224 return err;
6225 }
6226
l2cap_finish_move(struct l2cap_chan * chan)6227 static int l2cap_finish_move(struct l2cap_chan *chan)
6228 {
6229 BT_DBG("chan %p", chan);
6230
6231 chan->rx_state = L2CAP_RX_STATE_RECV;
6232 chan->conn->mtu = chan->conn->hcon->mtu;
6233
6234 return l2cap_resegment(chan);
6235 }
6236
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6237 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6238 struct l2cap_ctrl *control,
6239 struct sk_buff *skb, u8 event)
6240 {
6241 int err;
6242
6243 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6244 event);
6245
6246 if (!control->poll)
6247 return -EPROTO;
6248
6249 l2cap_process_reqseq(chan, control->reqseq);
6250
6251 if (!skb_queue_empty(&chan->tx_q))
6252 chan->tx_send_head = skb_peek(&chan->tx_q);
6253 else
6254 chan->tx_send_head = NULL;
6255
6256 /* Rewind next_tx_seq to the point expected
6257 * by the receiver.
6258 */
6259 chan->next_tx_seq = control->reqseq;
6260 chan->unacked_frames = 0;
6261
6262 err = l2cap_finish_move(chan);
6263 if (err)
6264 return err;
6265
6266 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6267 l2cap_send_i_or_rr_or_rnr(chan);
6268
6269 if (event == L2CAP_EV_RECV_IFRAME)
6270 return -EPROTO;
6271
6272 return l2cap_rx_state_recv(chan, control, NULL, event);
6273 }
6274
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6275 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6276 struct l2cap_ctrl *control,
6277 struct sk_buff *skb, u8 event)
6278 {
6279 int err;
6280
6281 if (!control->final)
6282 return -EPROTO;
6283
6284 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6285
6286 chan->rx_state = L2CAP_RX_STATE_RECV;
6287 l2cap_process_reqseq(chan, control->reqseq);
6288
6289 if (!skb_queue_empty(&chan->tx_q))
6290 chan->tx_send_head = skb_peek(&chan->tx_q);
6291 else
6292 chan->tx_send_head = NULL;
6293
6294 /* Rewind next_tx_seq to the point expected
6295 * by the receiver.
6296 */
6297 chan->next_tx_seq = control->reqseq;
6298 chan->unacked_frames = 0;
6299 chan->conn->mtu = chan->conn->hcon->mtu;
6300
6301 err = l2cap_resegment(chan);
6302
6303 if (!err)
6304 err = l2cap_rx_state_recv(chan, control, skb, event);
6305
6306 return err;
6307 }
6308
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6309 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6310 {
6311 /* Make sure reqseq is for a packet that has been sent but not acked */
6312 u16 unacked;
6313
6314 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6315 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6316 }
6317
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6318 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6319 struct sk_buff *skb, u8 event)
6320 {
6321 int err = 0;
6322
6323 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6324 control, skb, event, chan->rx_state);
6325
6326 if (__valid_reqseq(chan, control->reqseq)) {
6327 switch (chan->rx_state) {
6328 case L2CAP_RX_STATE_RECV:
6329 err = l2cap_rx_state_recv(chan, control, skb, event);
6330 break;
6331 case L2CAP_RX_STATE_SREJ_SENT:
6332 err = l2cap_rx_state_srej_sent(chan, control, skb,
6333 event);
6334 break;
6335 case L2CAP_RX_STATE_WAIT_P:
6336 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6337 break;
6338 case L2CAP_RX_STATE_WAIT_F:
6339 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6340 break;
6341 default:
6342 /* shut it down */
6343 break;
6344 }
6345 } else {
6346 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6347 control->reqseq, chan->next_tx_seq,
6348 chan->expected_ack_seq);
6349 l2cap_send_disconn_req(chan, ECONNRESET);
6350 }
6351
6352 return err;
6353 }
6354
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6355 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6356 struct sk_buff *skb)
6357 {
6358 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6359 * the txseq field in advance to use it after l2cap_reassemble_sdu
6360 * returns and to avoid the race condition, for example:
6361 *
6362 * The current thread calls:
6363 * l2cap_reassemble_sdu
6364 * chan->ops->recv == l2cap_sock_recv_cb
6365 * __sock_queue_rcv_skb
6366 * Another thread calls:
6367 * bt_sock_recvmsg
6368 * skb_recv_datagram
6369 * skb_free_datagram
6370 * Then the current thread tries to access control, but it was freed by
6371 * skb_free_datagram.
6372 */
6373 u16 txseq = control->txseq;
6374
6375 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6376 chan->rx_state);
6377
6378 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6379 l2cap_pass_to_tx(chan, control);
6380
6381 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6382 __next_seq(chan, chan->buffer_seq));
6383
6384 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6385
6386 l2cap_reassemble_sdu(chan, skb, control);
6387 } else {
6388 if (chan->sdu) {
6389 kfree_skb(chan->sdu);
6390 chan->sdu = NULL;
6391 }
6392 chan->sdu_last_frag = NULL;
6393 chan->sdu_len = 0;
6394
6395 if (skb) {
6396 BT_DBG("Freeing %p", skb);
6397 kfree_skb(skb);
6398 }
6399 }
6400
6401 chan->last_acked_seq = txseq;
6402 chan->expected_tx_seq = __next_seq(chan, txseq);
6403
6404 return 0;
6405 }
6406
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6407 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6408 {
6409 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6410 u16 len;
6411 u8 event;
6412
6413 __unpack_control(chan, skb);
6414
6415 len = skb->len;
6416
6417 /*
6418 * We can just drop the corrupted I-frame here.
6419 * Receiver will miss it and start proper recovery
6420 * procedures and ask for retransmission.
6421 */
6422 if (l2cap_check_fcs(chan, skb))
6423 goto drop;
6424
6425 if (!control->sframe && control->sar == L2CAP_SAR_START)
6426 len -= L2CAP_SDULEN_SIZE;
6427
6428 if (chan->fcs == L2CAP_FCS_CRC16)
6429 len -= L2CAP_FCS_SIZE;
6430
6431 if (len > chan->mps) {
6432 l2cap_send_disconn_req(chan, ECONNRESET);
6433 goto drop;
6434 }
6435
6436 if (chan->ops->filter) {
6437 if (chan->ops->filter(chan, skb))
6438 goto drop;
6439 }
6440
6441 if (!control->sframe) {
6442 int err;
6443
6444 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6445 control->sar, control->reqseq, control->final,
6446 control->txseq);
6447
6448 /* Validate F-bit - F=0 always valid, F=1 only
6449 * valid in TX WAIT_F
6450 */
6451 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6452 goto drop;
6453
6454 if (chan->mode != L2CAP_MODE_STREAMING) {
6455 event = L2CAP_EV_RECV_IFRAME;
6456 err = l2cap_rx(chan, control, skb, event);
6457 } else {
6458 err = l2cap_stream_rx(chan, control, skb);
6459 }
6460
6461 if (err)
6462 l2cap_send_disconn_req(chan, ECONNRESET);
6463 } else {
6464 const u8 rx_func_to_event[4] = {
6465 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6466 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6467 };
6468
6469 /* Only I-frames are expected in streaming mode */
6470 if (chan->mode == L2CAP_MODE_STREAMING)
6471 goto drop;
6472
6473 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6474 control->reqseq, control->final, control->poll,
6475 control->super);
6476
6477 if (len != 0) {
6478 BT_ERR("Trailing bytes: %d in sframe", len);
6479 l2cap_send_disconn_req(chan, ECONNRESET);
6480 goto drop;
6481 }
6482
6483 /* Validate F and P bits */
6484 if (control->final && (control->poll ||
6485 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6486 goto drop;
6487
6488 event = rx_func_to_event[control->super];
6489 if (l2cap_rx(chan, control, skb, event))
6490 l2cap_send_disconn_req(chan, ECONNRESET);
6491 }
6492
6493 return 0;
6494
6495 drop:
6496 kfree_skb(skb);
6497 return 0;
6498 }
6499
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6500 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6501 {
6502 struct l2cap_conn *conn = chan->conn;
6503 struct l2cap_le_credits pkt;
6504 u16 return_credits = l2cap_le_rx_credits(chan);
6505
6506 if (chan->rx_credits >= return_credits)
6507 return;
6508
6509 return_credits -= chan->rx_credits;
6510
6511 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6512
6513 chan->rx_credits += return_credits;
6514
6515 pkt.cid = cpu_to_le16(chan->scid);
6516 pkt.credits = cpu_to_le16(return_credits);
6517
6518 chan->ident = l2cap_get_ident(conn);
6519
6520 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6521 }
6522
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6523 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6524 {
6525 if (chan->rx_avail == rx_avail)
6526 return;
6527
6528 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6529
6530 chan->rx_avail = rx_avail;
6531
6532 if (chan->state == BT_CONNECTED)
6533 l2cap_chan_le_send_credits(chan);
6534 }
6535
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6536 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6537 {
6538 int err;
6539
6540 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6541
6542 /* Wait recv to confirm reception before updating the credits */
6543 err = chan->ops->recv(chan, skb);
6544
6545 if (err < 0 && chan->rx_avail != -1) {
6546 BT_ERR("Queueing received LE L2CAP data failed");
6547 l2cap_send_disconn_req(chan, ECONNRESET);
6548 return err;
6549 }
6550
6551 /* Update credits whenever an SDU is received */
6552 l2cap_chan_le_send_credits(chan);
6553
6554 return err;
6555 }
6556
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6557 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6558 {
6559 int err;
6560
6561 if (!chan->rx_credits) {
6562 BT_ERR("No credits to receive LE L2CAP data");
6563 l2cap_send_disconn_req(chan, ECONNRESET);
6564 return -ENOBUFS;
6565 }
6566
6567 if (chan->imtu < skb->len) {
6568 BT_ERR("Too big LE L2CAP PDU");
6569 return -ENOBUFS;
6570 }
6571
6572 chan->rx_credits--;
6573 BT_DBG("chan %p: rx_credits %u -> %u",
6574 chan, chan->rx_credits + 1, chan->rx_credits);
6575
6576 /* Update if remote had run out of credits, this should only happens
6577 * if the remote is not using the entire MPS.
6578 */
6579 if (!chan->rx_credits)
6580 l2cap_chan_le_send_credits(chan);
6581
6582 err = 0;
6583
6584 if (!chan->sdu) {
6585 u16 sdu_len;
6586
6587 sdu_len = get_unaligned_le16(skb->data);
6588 skb_pull(skb, L2CAP_SDULEN_SIZE);
6589
6590 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6591 sdu_len, skb->len, chan->imtu);
6592
6593 if (sdu_len > chan->imtu) {
6594 BT_ERR("Too big LE L2CAP SDU length received");
6595 err = -EMSGSIZE;
6596 goto failed;
6597 }
6598
6599 if (skb->len > sdu_len) {
6600 BT_ERR("Too much LE L2CAP data received");
6601 err = -EINVAL;
6602 goto failed;
6603 }
6604
6605 if (skb->len == sdu_len)
6606 return l2cap_ecred_recv(chan, skb);
6607
6608 chan->sdu = skb;
6609 chan->sdu_len = sdu_len;
6610 chan->sdu_last_frag = skb;
6611
6612 /* Detect if remote is not able to use the selected MPS */
6613 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6614 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6615
6616 /* Adjust the number of credits */
6617 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6618 chan->mps = mps_len;
6619 l2cap_chan_le_send_credits(chan);
6620 }
6621
6622 return 0;
6623 }
6624
6625 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6626 chan->sdu->len, skb->len, chan->sdu_len);
6627
6628 if (chan->sdu->len + skb->len > chan->sdu_len) {
6629 BT_ERR("Too much LE L2CAP data received");
6630 err = -EINVAL;
6631 goto failed;
6632 }
6633
6634 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6635 skb = NULL;
6636
6637 if (chan->sdu->len == chan->sdu_len) {
6638 err = l2cap_ecred_recv(chan, chan->sdu);
6639 if (!err) {
6640 chan->sdu = NULL;
6641 chan->sdu_last_frag = NULL;
6642 chan->sdu_len = 0;
6643 }
6644 }
6645
6646 failed:
6647 if (err) {
6648 kfree_skb(skb);
6649 kfree_skb(chan->sdu);
6650 chan->sdu = NULL;
6651 chan->sdu_last_frag = NULL;
6652 chan->sdu_len = 0;
6653 }
6654
6655 /* We can't return an error here since we took care of the skb
6656 * freeing internally. An error return would cause the caller to
6657 * do a double-free of the skb.
6658 */
6659 return 0;
6660 }
6661
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6662 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6663 struct sk_buff *skb)
6664 {
6665 struct l2cap_chan *chan;
6666
6667 chan = l2cap_get_chan_by_scid(conn, cid);
6668 if (!chan) {
6669 BT_DBG("unknown cid 0x%4.4x", cid);
6670 /* Drop packet and return */
6671 kfree_skb(skb);
6672 return;
6673 }
6674
6675 BT_DBG("chan %p, len %d", chan, skb->len);
6676
6677 /* If we receive data on a fixed channel before the info req/rsp
6678 * procedure is done simply assume that the channel is supported
6679 * and mark it as ready.
6680 */
6681 if (chan->chan_type == L2CAP_CHAN_FIXED)
6682 l2cap_chan_ready(chan);
6683
6684 if (chan->state != BT_CONNECTED)
6685 goto drop;
6686
6687 switch (chan->mode) {
6688 case L2CAP_MODE_LE_FLOWCTL:
6689 case L2CAP_MODE_EXT_FLOWCTL:
6690 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6691 goto drop;
6692
6693 goto done;
6694
6695 case L2CAP_MODE_BASIC:
6696 /* If socket recv buffers overflows we drop data here
6697 * which is *bad* because L2CAP has to be reliable.
6698 * But we don't have any other choice. L2CAP doesn't
6699 * provide flow control mechanism. */
6700
6701 if (chan->imtu < skb->len) {
6702 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6703 goto drop;
6704 }
6705
6706 if (!chan->ops->recv(chan, skb))
6707 goto done;
6708 break;
6709
6710 case L2CAP_MODE_ERTM:
6711 case L2CAP_MODE_STREAMING:
6712 l2cap_data_rcv(chan, skb);
6713 goto done;
6714
6715 default:
6716 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6717 break;
6718 }
6719
6720 drop:
6721 kfree_skb(skb);
6722
6723 done:
6724 l2cap_chan_unlock(chan);
6725 l2cap_chan_put(chan);
6726 }
6727
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6728 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6729 struct sk_buff *skb)
6730 {
6731 struct hci_conn *hcon = conn->hcon;
6732 struct l2cap_chan *chan;
6733
6734 if (hcon->type != ACL_LINK)
6735 goto free_skb;
6736
6737 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6738 ACL_LINK);
6739 if (!chan)
6740 goto free_skb;
6741
6742 BT_DBG("chan %p, len %d", chan, skb->len);
6743
6744 l2cap_chan_lock(chan);
6745
6746 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6747 goto drop;
6748
6749 if (chan->imtu < skb->len)
6750 goto drop;
6751
6752 /* Store remote BD_ADDR and PSM for msg_name */
6753 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6754 bt_cb(skb)->l2cap.psm = psm;
6755
6756 if (!chan->ops->recv(chan, skb)) {
6757 l2cap_chan_unlock(chan);
6758 l2cap_chan_put(chan);
6759 return;
6760 }
6761
6762 drop:
6763 l2cap_chan_unlock(chan);
6764 l2cap_chan_put(chan);
6765 free_skb:
6766 kfree_skb(skb);
6767 }
6768
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6769 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6770 {
6771 struct l2cap_hdr *lh = (void *) skb->data;
6772 struct hci_conn *hcon = conn->hcon;
6773 u16 cid, len;
6774 __le16 psm;
6775
6776 if (hcon->state != BT_CONNECTED) {
6777 BT_DBG("queueing pending rx skb");
6778 skb_queue_tail(&conn->pending_rx, skb);
6779 return;
6780 }
6781
6782 skb_pull(skb, L2CAP_HDR_SIZE);
6783 cid = __le16_to_cpu(lh->cid);
6784 len = __le16_to_cpu(lh->len);
6785
6786 if (len != skb->len) {
6787 kfree_skb(skb);
6788 return;
6789 }
6790
6791 /* Since we can't actively block incoming LE connections we must
6792 * at least ensure that we ignore incoming data from them.
6793 */
6794 if (hcon->type == LE_LINK &&
6795 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6796 bdaddr_dst_type(hcon))) {
6797 kfree_skb(skb);
6798 return;
6799 }
6800
6801 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6802
6803 switch (cid) {
6804 case L2CAP_CID_SIGNALING:
6805 l2cap_sig_channel(conn, skb);
6806 break;
6807
6808 case L2CAP_CID_CONN_LESS:
6809 psm = get_unaligned((__le16 *) skb->data);
6810 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6811 l2cap_conless_channel(conn, psm, skb);
6812 break;
6813
6814 case L2CAP_CID_LE_SIGNALING:
6815 l2cap_le_sig_channel(conn, skb);
6816 break;
6817
6818 default:
6819 l2cap_data_channel(conn, cid, skb);
6820 break;
6821 }
6822 }
6823
process_pending_rx(struct work_struct * work)6824 static void process_pending_rx(struct work_struct *work)
6825 {
6826 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6827 pending_rx_work);
6828 struct sk_buff *skb;
6829
6830 BT_DBG("");
6831
6832 mutex_lock(&conn->lock);
6833
6834 while ((skb = skb_dequeue(&conn->pending_rx)))
6835 l2cap_recv_frame(conn, skb);
6836
6837 mutex_unlock(&conn->lock);
6838 }
6839
l2cap_conn_add(struct hci_conn * hcon)6840 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6841 {
6842 struct l2cap_conn *conn = hcon->l2cap_data;
6843 struct hci_chan *hchan;
6844
6845 if (conn)
6846 return conn;
6847
6848 hchan = hci_chan_create(hcon);
6849 if (!hchan)
6850 return NULL;
6851
6852 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6853 if (!conn) {
6854 hci_chan_del(hchan);
6855 return NULL;
6856 }
6857
6858 kref_init(&conn->ref);
6859 hcon->l2cap_data = conn;
6860 conn->hcon = hci_conn_get(hcon);
6861 conn->hchan = hchan;
6862
6863 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6864
6865 conn->mtu = hcon->mtu;
6866 conn->feat_mask = 0;
6867
6868 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6869
6870 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6871 (bredr_sc_enabled(hcon->hdev) ||
6872 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6873 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6874
6875 mutex_init(&conn->ident_lock);
6876 mutex_init(&conn->lock);
6877
6878 INIT_LIST_HEAD(&conn->chan_l);
6879 INIT_LIST_HEAD(&conn->users);
6880
6881 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6882
6883 skb_queue_head_init(&conn->pending_rx);
6884 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6885 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6886
6887 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6888
6889 return conn;
6890 }
6891
is_valid_psm(u16 psm,u8 dst_type)6892 static bool is_valid_psm(u16 psm, u8 dst_type)
6893 {
6894 if (!psm)
6895 return false;
6896
6897 if (bdaddr_type_is_le(dst_type))
6898 return (psm <= 0x00ff);
6899
6900 /* PSM must be odd and lsb of upper byte must be 0 */
6901 return ((psm & 0x0101) == 0x0001);
6902 }
6903
6904 struct l2cap_chan_data {
6905 struct l2cap_chan *chan;
6906 struct pid *pid;
6907 int count;
6908 };
6909
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)6910 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6911 {
6912 struct l2cap_chan_data *d = data;
6913 struct pid *pid;
6914
6915 if (chan == d->chan)
6916 return;
6917
6918 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6919 return;
6920
6921 pid = chan->ops->get_peer_pid(chan);
6922
6923 /* Only count deferred channels with the same PID/PSM */
6924 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6925 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6926 return;
6927
6928 d->count++;
6929 }
6930
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)6931 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6932 bdaddr_t *dst, u8 dst_type)
6933 {
6934 struct l2cap_conn *conn;
6935 struct hci_conn *hcon;
6936 struct hci_dev *hdev;
6937 int err;
6938
6939 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6940 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6941
6942 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6943 if (!hdev)
6944 return -EHOSTUNREACH;
6945
6946 hci_dev_lock(hdev);
6947
6948 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6949 chan->chan_type != L2CAP_CHAN_RAW) {
6950 err = -EINVAL;
6951 goto done;
6952 }
6953
6954 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6955 err = -EINVAL;
6956 goto done;
6957 }
6958
6959 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6960 err = -EINVAL;
6961 goto done;
6962 }
6963
6964 switch (chan->mode) {
6965 case L2CAP_MODE_BASIC:
6966 break;
6967 case L2CAP_MODE_LE_FLOWCTL:
6968 break;
6969 case L2CAP_MODE_EXT_FLOWCTL:
6970 if (!enable_ecred) {
6971 err = -EOPNOTSUPP;
6972 goto done;
6973 }
6974 break;
6975 case L2CAP_MODE_ERTM:
6976 case L2CAP_MODE_STREAMING:
6977 if (!disable_ertm)
6978 break;
6979 fallthrough;
6980 default:
6981 err = -EOPNOTSUPP;
6982 goto done;
6983 }
6984
6985 switch (chan->state) {
6986 case BT_CONNECT:
6987 case BT_CONNECT2:
6988 case BT_CONFIG:
6989 /* Already connecting */
6990 err = 0;
6991 goto done;
6992
6993 case BT_CONNECTED:
6994 /* Already connected */
6995 err = -EISCONN;
6996 goto done;
6997
6998 case BT_OPEN:
6999 case BT_BOUND:
7000 /* Can connect */
7001 break;
7002
7003 default:
7004 err = -EBADFD;
7005 goto done;
7006 }
7007
7008 /* Set destination address and psm */
7009 bacpy(&chan->dst, dst);
7010 chan->dst_type = dst_type;
7011
7012 chan->psm = psm;
7013 chan->dcid = cid;
7014
7015 if (bdaddr_type_is_le(dst_type)) {
7016 /* Convert from L2CAP channel address type to HCI address type
7017 */
7018 if (dst_type == BDADDR_LE_PUBLIC)
7019 dst_type = ADDR_LE_DEV_PUBLIC;
7020 else
7021 dst_type = ADDR_LE_DEV_RANDOM;
7022
7023 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7024 hcon = hci_connect_le(hdev, dst, dst_type, false,
7025 chan->sec_level,
7026 HCI_LE_CONN_TIMEOUT,
7027 HCI_ROLE_SLAVE);
7028 else
7029 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7030 chan->sec_level,
7031 HCI_LE_CONN_TIMEOUT,
7032 CONN_REASON_L2CAP_CHAN);
7033
7034 } else {
7035 u8 auth_type = l2cap_get_auth_type(chan);
7036 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7037 CONN_REASON_L2CAP_CHAN);
7038 }
7039
7040 if (IS_ERR(hcon)) {
7041 err = PTR_ERR(hcon);
7042 goto done;
7043 }
7044
7045 conn = l2cap_conn_add(hcon);
7046 if (!conn) {
7047 hci_conn_drop(hcon);
7048 err = -ENOMEM;
7049 goto done;
7050 }
7051
7052 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7053 struct l2cap_chan_data data;
7054
7055 data.chan = chan;
7056 data.pid = chan->ops->get_peer_pid(chan);
7057 data.count = 1;
7058
7059 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7060
7061 /* Check if there isn't too many channels being connected */
7062 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7063 hci_conn_drop(hcon);
7064 err = -EPROTO;
7065 goto done;
7066 }
7067 }
7068
7069 mutex_lock(&conn->lock);
7070 l2cap_chan_lock(chan);
7071
7072 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7073 hci_conn_drop(hcon);
7074 err = -EBUSY;
7075 goto chan_unlock;
7076 }
7077
7078 /* Update source addr of the socket */
7079 bacpy(&chan->src, &hcon->src);
7080 chan->src_type = bdaddr_src_type(hcon);
7081
7082 __l2cap_chan_add(conn, chan);
7083
7084 /* l2cap_chan_add takes its own ref so we can drop this one */
7085 hci_conn_drop(hcon);
7086
7087 l2cap_state_change(chan, BT_CONNECT);
7088 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7089
7090 /* Release chan->sport so that it can be reused by other
7091 * sockets (as it's only used for listening sockets).
7092 */
7093 write_lock(&chan_list_lock);
7094 chan->sport = 0;
7095 write_unlock(&chan_list_lock);
7096
7097 if (hcon->state == BT_CONNECTED) {
7098 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7099 __clear_chan_timer(chan);
7100 if (l2cap_chan_check_security(chan, true))
7101 l2cap_state_change(chan, BT_CONNECTED);
7102 } else
7103 l2cap_do_start(chan);
7104 }
7105
7106 err = 0;
7107
7108 chan_unlock:
7109 l2cap_chan_unlock(chan);
7110 mutex_unlock(&conn->lock);
7111 done:
7112 hci_dev_unlock(hdev);
7113 hci_dev_put(hdev);
7114 return err;
7115 }
7116 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7117
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7118 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7119 {
7120 struct l2cap_conn *conn = chan->conn;
7121 struct {
7122 struct l2cap_ecred_reconf_req req;
7123 __le16 scid;
7124 } pdu;
7125
7126 pdu.req.mtu = cpu_to_le16(chan->imtu);
7127 pdu.req.mps = cpu_to_le16(chan->mps);
7128 pdu.scid = cpu_to_le16(chan->scid);
7129
7130 chan->ident = l2cap_get_ident(conn);
7131
7132 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7133 sizeof(pdu), &pdu);
7134 }
7135
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7136 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7137 {
7138 if (chan->imtu > mtu)
7139 return -EINVAL;
7140
7141 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7142
7143 chan->imtu = mtu;
7144
7145 l2cap_ecred_reconfigure(chan);
7146
7147 return 0;
7148 }
7149
7150 /* ---- L2CAP interface with lower layer (HCI) ---- */
7151
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7152 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7153 {
7154 int exact = 0, lm1 = 0, lm2 = 0;
7155 struct l2cap_chan *c;
7156
7157 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7158
7159 /* Find listening sockets and check their link_mode */
7160 read_lock(&chan_list_lock);
7161 list_for_each_entry(c, &chan_list, global_l) {
7162 if (c->state != BT_LISTEN)
7163 continue;
7164
7165 if (!bacmp(&c->src, &hdev->bdaddr)) {
7166 lm1 |= HCI_LM_ACCEPT;
7167 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7168 lm1 |= HCI_LM_MASTER;
7169 exact++;
7170 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7171 lm2 |= HCI_LM_ACCEPT;
7172 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7173 lm2 |= HCI_LM_MASTER;
7174 }
7175 }
7176 read_unlock(&chan_list_lock);
7177
7178 return exact ? lm1 : lm2;
7179 }
7180
7181 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7182 * from an existing channel in the list or from the beginning of the
7183 * global list (by passing NULL as first parameter).
7184 */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7185 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7186 struct hci_conn *hcon)
7187 {
7188 u8 src_type = bdaddr_src_type(hcon);
7189
7190 read_lock(&chan_list_lock);
7191
7192 if (c)
7193 c = list_next_entry(c, global_l);
7194 else
7195 c = list_entry(chan_list.next, typeof(*c), global_l);
7196
7197 list_for_each_entry_from(c, &chan_list, global_l) {
7198 if (c->chan_type != L2CAP_CHAN_FIXED)
7199 continue;
7200 if (c->state != BT_LISTEN)
7201 continue;
7202 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7203 continue;
7204 if (src_type != c->src_type)
7205 continue;
7206
7207 c = l2cap_chan_hold_unless_zero(c);
7208 read_unlock(&chan_list_lock);
7209 return c;
7210 }
7211
7212 read_unlock(&chan_list_lock);
7213
7214 return NULL;
7215 }
7216
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7217 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7218 {
7219 struct hci_dev *hdev = hcon->hdev;
7220 struct l2cap_conn *conn;
7221 struct l2cap_chan *pchan;
7222 u8 dst_type;
7223
7224 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7225 return;
7226
7227 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7228
7229 if (status) {
7230 l2cap_conn_del(hcon, bt_to_errno(status));
7231 return;
7232 }
7233
7234 conn = l2cap_conn_add(hcon);
7235 if (!conn)
7236 return;
7237
7238 dst_type = bdaddr_dst_type(hcon);
7239
7240 /* If device is blocked, do not create channels for it */
7241 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7242 return;
7243
7244 /* Find fixed channels and notify them of the new connection. We
7245 * use multiple individual lookups, continuing each time where
7246 * we left off, because the list lock would prevent calling the
7247 * potentially sleeping l2cap_chan_lock() function.
7248 */
7249 pchan = l2cap_global_fixed_chan(NULL, hcon);
7250 while (pchan) {
7251 struct l2cap_chan *chan, *next;
7252
7253 /* Client fixed channels should override server ones */
7254 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7255 goto next;
7256
7257 l2cap_chan_lock(pchan);
7258 chan = pchan->ops->new_connection(pchan);
7259 if (chan) {
7260 bacpy(&chan->src, &hcon->src);
7261 bacpy(&chan->dst, &hcon->dst);
7262 chan->src_type = bdaddr_src_type(hcon);
7263 chan->dst_type = dst_type;
7264
7265 __l2cap_chan_add(conn, chan);
7266 }
7267
7268 l2cap_chan_unlock(pchan);
7269 next:
7270 next = l2cap_global_fixed_chan(pchan, hcon);
7271 l2cap_chan_put(pchan);
7272 pchan = next;
7273 }
7274
7275 l2cap_conn_ready(conn);
7276 }
7277
l2cap_disconn_ind(struct hci_conn * hcon)7278 int l2cap_disconn_ind(struct hci_conn *hcon)
7279 {
7280 struct l2cap_conn *conn = hcon->l2cap_data;
7281
7282 BT_DBG("hcon %p", hcon);
7283
7284 if (!conn)
7285 return HCI_ERROR_REMOTE_USER_TERM;
7286 return conn->disc_reason;
7287 }
7288
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7289 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7290 {
7291 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7292 return;
7293
7294 BT_DBG("hcon %p reason %d", hcon, reason);
7295
7296 l2cap_conn_del(hcon, bt_to_errno(reason));
7297 }
7298
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7299 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7300 {
7301 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7302 return;
7303
7304 if (encrypt == 0x00) {
7305 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7306 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7307 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7308 chan->sec_level == BT_SECURITY_FIPS)
7309 l2cap_chan_close(chan, ECONNREFUSED);
7310 } else {
7311 if (chan->sec_level == BT_SECURITY_MEDIUM)
7312 __clear_chan_timer(chan);
7313 }
7314 }
7315
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7316 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7317 {
7318 struct l2cap_conn *conn = hcon->l2cap_data;
7319 struct l2cap_chan *chan;
7320
7321 if (!conn)
7322 return;
7323
7324 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7325
7326 mutex_lock(&conn->lock);
7327
7328 list_for_each_entry(chan, &conn->chan_l, list) {
7329 l2cap_chan_lock(chan);
7330
7331 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7332 state_to_string(chan->state));
7333
7334 if (!status && encrypt)
7335 chan->sec_level = hcon->sec_level;
7336
7337 if (!__l2cap_no_conn_pending(chan)) {
7338 l2cap_chan_unlock(chan);
7339 continue;
7340 }
7341
7342 if (!status && (chan->state == BT_CONNECTED ||
7343 chan->state == BT_CONFIG)) {
7344 chan->ops->resume(chan);
7345 l2cap_check_encryption(chan, encrypt);
7346 l2cap_chan_unlock(chan);
7347 continue;
7348 }
7349
7350 if (chan->state == BT_CONNECT) {
7351 if (!status && l2cap_check_enc_key_size(hcon, chan))
7352 l2cap_start_connection(chan);
7353 else
7354 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7355 } else if (chan->state == BT_CONNECT2 &&
7356 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7357 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7358 struct l2cap_conn_rsp rsp;
7359 __u16 res, stat;
7360
7361 if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7362 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7363 res = L2CAP_CR_PEND;
7364 stat = L2CAP_CS_AUTHOR_PEND;
7365 chan->ops->defer(chan);
7366 } else {
7367 l2cap_state_change(chan, BT_CONFIG);
7368 res = L2CAP_CR_SUCCESS;
7369 stat = L2CAP_CS_NO_INFO;
7370 }
7371 } else {
7372 l2cap_state_change(chan, BT_DISCONN);
7373 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7374 res = L2CAP_CR_SEC_BLOCK;
7375 stat = L2CAP_CS_NO_INFO;
7376 }
7377
7378 rsp.scid = cpu_to_le16(chan->dcid);
7379 rsp.dcid = cpu_to_le16(chan->scid);
7380 rsp.result = cpu_to_le16(res);
7381 rsp.status = cpu_to_le16(stat);
7382 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7383 sizeof(rsp), &rsp);
7384
7385 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7386 res == L2CAP_CR_SUCCESS) {
7387 char buf[128];
7388 set_bit(CONF_REQ_SENT, &chan->conf_state);
7389 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7390 L2CAP_CONF_REQ,
7391 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7392 buf);
7393 chan->num_conf_req++;
7394 }
7395 }
7396
7397 l2cap_chan_unlock(chan);
7398 }
7399
7400 mutex_unlock(&conn->lock);
7401 }
7402
7403 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7404 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7405 u16 len)
7406 {
7407 if (!conn->rx_skb) {
7408 /* Allocate skb for the complete frame (with header) */
7409 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7410 if (!conn->rx_skb)
7411 return -ENOMEM;
7412 /* Init rx_len */
7413 conn->rx_len = len;
7414
7415 skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7416 skb->tstamp_type);
7417 }
7418
7419 /* Copy as much as the rx_skb can hold */
7420 len = min_t(u16, len, skb->len);
7421 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7422 skb_pull(skb, len);
7423 conn->rx_len -= len;
7424
7425 return len;
7426 }
7427
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7428 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7429 {
7430 struct sk_buff *rx_skb;
7431 int len;
7432
7433 /* Append just enough to complete the header */
7434 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7435
7436 /* If header could not be read just continue */
7437 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7438 return len;
7439
7440 rx_skb = conn->rx_skb;
7441 len = get_unaligned_le16(rx_skb->data);
7442
7443 /* Check if rx_skb has enough space to received all fragments */
7444 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7445 /* Update expected len */
7446 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7447 return L2CAP_LEN_SIZE;
7448 }
7449
7450 /* Reset conn->rx_skb since it will need to be reallocated in order to
7451 * fit all fragments.
7452 */
7453 conn->rx_skb = NULL;
7454
7455 /* Reallocates rx_skb using the exact expected length */
7456 len = l2cap_recv_frag(conn, rx_skb,
7457 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7458 kfree_skb(rx_skb);
7459
7460 return len;
7461 }
7462
l2cap_recv_reset(struct l2cap_conn * conn)7463 static void l2cap_recv_reset(struct l2cap_conn *conn)
7464 {
7465 kfree_skb(conn->rx_skb);
7466 conn->rx_skb = NULL;
7467 conn->rx_len = 0;
7468 }
7469
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7470 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7471 {
7472 if (!c)
7473 return NULL;
7474
7475 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7476
7477 if (!kref_get_unless_zero(&c->ref))
7478 return NULL;
7479
7480 return c;
7481 }
7482
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7483 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7484 {
7485 struct l2cap_conn *conn;
7486 int len;
7487
7488 /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7489 hci_dev_lock(hcon->hdev);
7490
7491 conn = hcon->l2cap_data;
7492
7493 if (!conn)
7494 conn = l2cap_conn_add(hcon);
7495
7496 conn = l2cap_conn_hold_unless_zero(conn);
7497
7498 hci_dev_unlock(hcon->hdev);
7499
7500 if (!conn) {
7501 kfree_skb(skb);
7502 return;
7503 }
7504
7505 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7506
7507 mutex_lock(&conn->lock);
7508
7509 switch (flags) {
7510 case ACL_START:
7511 case ACL_START_NO_FLUSH:
7512 case ACL_COMPLETE:
7513 if (conn->rx_skb) {
7514 BT_ERR("Unexpected start frame (len %d)", skb->len);
7515 l2cap_recv_reset(conn);
7516 l2cap_conn_unreliable(conn, ECOMM);
7517 }
7518
7519 /* Start fragment may not contain the L2CAP length so just
7520 * copy the initial byte when that happens and use conn->mtu as
7521 * expected length.
7522 */
7523 if (skb->len < L2CAP_LEN_SIZE) {
7524 l2cap_recv_frag(conn, skb, conn->mtu);
7525 break;
7526 }
7527
7528 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7529
7530 if (len == skb->len) {
7531 /* Complete frame received */
7532 l2cap_recv_frame(conn, skb);
7533 goto unlock;
7534 }
7535
7536 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7537
7538 if (skb->len > len) {
7539 BT_ERR("Frame is too long (len %u, expected len %d)",
7540 skb->len, len);
7541 /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7542 * (Multiple Signaling Command in one PDU, Data
7543 * Truncated, BR/EDR) send a C-frame to the IUT with
7544 * PDU Length set to 8 and Channel ID set to the
7545 * correct signaling channel for the logical link.
7546 * The Information payload contains one L2CAP_ECHO_REQ
7547 * packet with Data Length set to 0 with 0 octets of
7548 * echo data and one invalid command packet due to
7549 * data truncated in PDU but present in HCI packet.
7550 *
7551 * Shorter the socket buffer to the PDU length to
7552 * allow to process valid commands from the PDU before
7553 * setting the socket unreliable.
7554 */
7555 skb->len = len;
7556 l2cap_recv_frame(conn, skb);
7557 l2cap_conn_unreliable(conn, ECOMM);
7558 goto unlock;
7559 }
7560
7561 /* Append fragment into frame (with header) */
7562 if (l2cap_recv_frag(conn, skb, len) < 0)
7563 goto drop;
7564
7565 break;
7566
7567 case ACL_CONT:
7568 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7569
7570 if (!conn->rx_skb) {
7571 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7572 l2cap_conn_unreliable(conn, ECOMM);
7573 goto drop;
7574 }
7575
7576 /* Complete the L2CAP length if it has not been read */
7577 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7578 if (l2cap_recv_len(conn, skb) < 0) {
7579 l2cap_conn_unreliable(conn, ECOMM);
7580 goto drop;
7581 }
7582
7583 /* Header still could not be read just continue */
7584 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7585 break;
7586 }
7587
7588 if (skb->len > conn->rx_len) {
7589 BT_ERR("Fragment is too long (len %u, expected %u)",
7590 skb->len, conn->rx_len);
7591 l2cap_recv_reset(conn);
7592 l2cap_conn_unreliable(conn, ECOMM);
7593 goto drop;
7594 }
7595
7596 /* Append fragment into frame (with header) */
7597 l2cap_recv_frag(conn, skb, skb->len);
7598
7599 if (!conn->rx_len) {
7600 /* Complete frame received. l2cap_recv_frame
7601 * takes ownership of the skb so set the global
7602 * rx_skb pointer to NULL first.
7603 */
7604 struct sk_buff *rx_skb = conn->rx_skb;
7605 conn->rx_skb = NULL;
7606 l2cap_recv_frame(conn, rx_skb);
7607 }
7608 break;
7609 }
7610
7611 drop:
7612 kfree_skb(skb);
7613 unlock:
7614 mutex_unlock(&conn->lock);
7615 l2cap_conn_put(conn);
7616 }
7617
7618 static struct hci_cb l2cap_cb = {
7619 .name = "L2CAP",
7620 .connect_cfm = l2cap_connect_cfm,
7621 .disconn_cfm = l2cap_disconn_cfm,
7622 .security_cfm = l2cap_security_cfm,
7623 };
7624
l2cap_debugfs_show(struct seq_file * f,void * p)7625 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7626 {
7627 struct l2cap_chan *c;
7628
7629 read_lock(&chan_list_lock);
7630
7631 list_for_each_entry(c, &chan_list, global_l) {
7632 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7633 &c->src, c->src_type, &c->dst, c->dst_type,
7634 c->state, __le16_to_cpu(c->psm),
7635 c->scid, c->dcid, c->imtu, c->omtu,
7636 c->sec_level, c->mode);
7637 }
7638
7639 read_unlock(&chan_list_lock);
7640
7641 return 0;
7642 }
7643
7644 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7645
7646 static struct dentry *l2cap_debugfs;
7647
l2cap_init(void)7648 int __init l2cap_init(void)
7649 {
7650 int err;
7651
7652 err = l2cap_init_sockets();
7653 if (err < 0)
7654 return err;
7655
7656 hci_register_cb(&l2cap_cb);
7657
7658 if (IS_ERR_OR_NULL(bt_debugfs))
7659 return 0;
7660
7661 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7662 NULL, &l2cap_debugfs_fops);
7663
7664 return 0;
7665 }
7666
l2cap_exit(void)7667 void l2cap_exit(void)
7668 {
7669 debugfs_remove(l2cap_debugfs);
7670 hci_unregister_cb(&l2cap_cb);
7671 l2cap_cleanup_sockets();
7672 }
7673
7674 module_param(disable_ertm, bool, 0644);
7675 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7676
7677 module_param(enable_ecred, bool, 0644);
7678 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7679