1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 5 Copyright (C) 2010 Google Inc. 6 Copyright (C) 2011 ProFUSION Embedded Systems 7 Copyright (c) 2012 Code Aurora Forum. All rights reserved. 8 9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 10 11 This program is free software; you can redistribute it and/or modify 12 it under the terms of the GNU General Public License version 2 as 13 published by the Free Software Foundation; 14 15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 23 24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 26 SOFTWARE IS DISCLAIMED. 27 */ 28 29 /* Bluetooth L2CAP core. */ 30 31 #include <linux/module.h> 32 33 #include <linux/debugfs.h> 34 #include <linux/crc16.h> 35 #include <linux/filter.h> 36 37 #include <net/bluetooth/bluetooth.h> 38 #include <net/bluetooth/hci_core.h> 39 #include <net/bluetooth/l2cap.h> 40 41 #include "smp.h" 42 #include "a2mp.h" 43 #include "amp.h" 44 45 #define LE_FLOWCTL_MAX_CREDITS 65535 46 47 bool disable_ertm; 48 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); 49 50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; 51 52 static LIST_HEAD(chan_list); 53 static DEFINE_RWLOCK(chan_list_lock); 54 55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 56 u8 code, u8 ident, u16 dlen, void *data); 57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 58 void *data); 59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); 60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); 61 62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 63 struct sk_buff_head *skbs, u8 event); 64 static void l2cap_retrans_timeout(struct work_struct *work); 65 static void l2cap_monitor_timeout(struct work_struct *work); 66 static void l2cap_ack_timeout(struct work_struct *work); 67 68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) 69 { 70 if (link_type == LE_LINK) { 71 if (bdaddr_type == ADDR_LE_DEV_PUBLIC) 72 return BDADDR_LE_PUBLIC; 73 else 74 return BDADDR_LE_RANDOM; 75 } 76 77 return BDADDR_BREDR; 78 } 79 80 static inline u8 bdaddr_src_type(struct hci_conn *hcon) 81 { 82 return bdaddr_type(hcon->type, hcon->src_type); 83 } 84 85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon) 86 { 87 return bdaddr_type(hcon->type, hcon->dst_type); 88 } 89 90 /* ---- L2CAP channels ---- */ 91 92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 93 u16 cid) 94 { 95 struct l2cap_chan *c; 96 97 list_for_each_entry(c, &conn->chan_l, list) { 98 if (c->dcid == cid) 99 return c; 100 } 101 return NULL; 102 } 103 104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, 105 u16 cid) 106 { 107 struct l2cap_chan *c; 108 109 list_for_each_entry(c, &conn->chan_l, list) { 110 if (c->scid == cid) 111 return c; 112 } 113 return NULL; 114 } 115 116 /* Find channel with given SCID. 117 * Returns a reference locked channel. 118 */ 119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, 120 u16 cid) 121 { 122 struct l2cap_chan *c; 123 124 mutex_lock(&conn->chan_lock); 125 c = __l2cap_get_chan_by_scid(conn, cid); 126 if (c) { 127 /* Only lock if chan reference is not 0 */ 128 c = l2cap_chan_hold_unless_zero(c); 129 if (c) 130 l2cap_chan_lock(c); 131 } 132 mutex_unlock(&conn->chan_lock); 133 134 return c; 135 } 136 137 /* Find channel with given DCID. 138 * Returns a reference locked channel. 139 */ 140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 141 u16 cid) 142 { 143 struct l2cap_chan *c; 144 145 mutex_lock(&conn->chan_lock); 146 c = __l2cap_get_chan_by_dcid(conn, cid); 147 if (c) { 148 /* Only lock if chan reference is not 0 */ 149 c = l2cap_chan_hold_unless_zero(c); 150 if (c) 151 l2cap_chan_lock(c); 152 } 153 mutex_unlock(&conn->chan_lock); 154 155 return c; 156 } 157 158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, 159 u8 ident) 160 { 161 struct l2cap_chan *c; 162 163 list_for_each_entry(c, &conn->chan_l, list) { 164 if (c->ident == ident) 165 return c; 166 } 167 return NULL; 168 } 169 170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, 171 u8 ident) 172 { 173 struct l2cap_chan *c; 174 175 mutex_lock(&conn->chan_lock); 176 c = __l2cap_get_chan_by_ident(conn, ident); 177 if (c) { 178 /* Only lock if chan reference is not 0 */ 179 c = l2cap_chan_hold_unless_zero(c); 180 if (c) 181 l2cap_chan_lock(c); 182 } 183 mutex_unlock(&conn->chan_lock); 184 185 return c; 186 } 187 188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, 189 u8 src_type) 190 { 191 struct l2cap_chan *c; 192 193 list_for_each_entry(c, &chan_list, global_l) { 194 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) 195 continue; 196 197 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) 198 continue; 199 200 if (c->sport == psm && !bacmp(&c->src, src)) 201 return c; 202 } 203 return NULL; 204 } 205 206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) 207 { 208 int err; 209 210 write_lock(&chan_list_lock); 211 212 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) { 213 err = -EADDRINUSE; 214 goto done; 215 } 216 217 if (psm) { 218 chan->psm = psm; 219 chan->sport = psm; 220 err = 0; 221 } else { 222 u16 p, start, end, incr; 223 224 if (chan->src_type == BDADDR_BREDR) { 225 start = L2CAP_PSM_DYN_START; 226 end = L2CAP_PSM_AUTO_END; 227 incr = 2; 228 } else { 229 start = L2CAP_PSM_LE_DYN_START; 230 end = L2CAP_PSM_LE_DYN_END; 231 incr = 1; 232 } 233 234 err = -EINVAL; 235 for (p = start; p <= end; p += incr) 236 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, 237 chan->src_type)) { 238 chan->psm = cpu_to_le16(p); 239 chan->sport = cpu_to_le16(p); 240 err = 0; 241 break; 242 } 243 } 244 245 done: 246 write_unlock(&chan_list_lock); 247 return err; 248 } 249 EXPORT_SYMBOL_GPL(l2cap_add_psm); 250 251 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 252 { 253 write_lock(&chan_list_lock); 254 255 /* Override the defaults (which are for conn-oriented) */ 256 chan->omtu = L2CAP_DEFAULT_MTU; 257 chan->chan_type = L2CAP_CHAN_FIXED; 258 259 chan->scid = scid; 260 261 write_unlock(&chan_list_lock); 262 263 return 0; 264 } 265 266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn) 267 { 268 u16 cid, dyn_end; 269 270 if (conn->hcon->type == LE_LINK) 271 dyn_end = L2CAP_CID_LE_DYN_END; 272 else 273 dyn_end = L2CAP_CID_DYN_END; 274 275 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { 276 if (!__l2cap_get_chan_by_scid(conn, cid)) 277 return cid; 278 } 279 280 return 0; 281 } 282 283 static void l2cap_state_change(struct l2cap_chan *chan, int state) 284 { 285 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), 286 state_to_string(state)); 287 288 chan->state = state; 289 chan->ops->state_change(chan, state, 0); 290 } 291 292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, 293 int state, int err) 294 { 295 chan->state = state; 296 chan->ops->state_change(chan, chan->state, err); 297 } 298 299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) 300 { 301 chan->ops->state_change(chan, chan->state, err); 302 } 303 304 static void __set_retrans_timer(struct l2cap_chan *chan) 305 { 306 if (!delayed_work_pending(&chan->monitor_timer) && 307 chan->retrans_timeout) { 308 l2cap_set_timer(chan, &chan->retrans_timer, 309 msecs_to_jiffies(chan->retrans_timeout)); 310 } 311 } 312 313 static void __set_monitor_timer(struct l2cap_chan *chan) 314 { 315 __clear_retrans_timer(chan); 316 if (chan->monitor_timeout) { 317 l2cap_set_timer(chan, &chan->monitor_timer, 318 msecs_to_jiffies(chan->monitor_timeout)); 319 } 320 } 321 322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, 323 u16 seq) 324 { 325 struct sk_buff *skb; 326 327 skb_queue_walk(head, skb) { 328 if (bt_cb(skb)->l2cap.txseq == seq) 329 return skb; 330 } 331 332 return NULL; 333 } 334 335 /* ---- L2CAP sequence number lists ---- */ 336 337 /* For ERTM, ordered lists of sequence numbers must be tracked for 338 * SREJ requests that are received and for frames that are to be 339 * retransmitted. These seq_list functions implement a singly-linked 340 * list in an array, where membership in the list can also be checked 341 * in constant time. Items can also be added to the tail of the list 342 * and removed from the head in constant time, without further memory 343 * allocs or frees. 344 */ 345 346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) 347 { 348 size_t alloc_size, i; 349 350 /* Allocated size is a power of 2 to map sequence numbers 351 * (which may be up to 14 bits) in to a smaller array that is 352 * sized for the negotiated ERTM transmit windows. 353 */ 354 alloc_size = roundup_pow_of_two(size); 355 356 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); 357 if (!seq_list->list) 358 return -ENOMEM; 359 360 seq_list->mask = alloc_size - 1; 361 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 363 for (i = 0; i < alloc_size; i++) 364 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 365 366 return 0; 367 } 368 369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) 370 { 371 kfree(seq_list->list); 372 } 373 374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, 375 u16 seq) 376 { 377 /* Constant-time check for list membership */ 378 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; 379 } 380 381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) 382 { 383 u16 seq = seq_list->head; 384 u16 mask = seq_list->mask; 385 386 seq_list->head = seq_list->list[seq & mask]; 387 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; 388 389 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { 390 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 392 } 393 394 return seq; 395 } 396 397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) 398 { 399 u16 i; 400 401 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) 402 return; 403 404 for (i = 0; i <= seq_list->mask; i++) 405 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 406 407 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 408 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 409 } 410 411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) 412 { 413 u16 mask = seq_list->mask; 414 415 /* All appends happen in constant time */ 416 417 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) 418 return; 419 420 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) 421 seq_list->head = seq; 422 else 423 seq_list->list[seq_list->tail & mask] = seq; 424 425 seq_list->tail = seq; 426 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; 427 } 428 429 static void l2cap_chan_timeout(struct work_struct *work) 430 { 431 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 432 chan_timer.work); 433 struct l2cap_conn *conn = chan->conn; 434 int reason; 435 436 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 437 438 mutex_lock(&conn->chan_lock); 439 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling 440 * this work. No need to call l2cap_chan_hold(chan) here again. 441 */ 442 l2cap_chan_lock(chan); 443 444 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 445 reason = ECONNREFUSED; 446 else if (chan->state == BT_CONNECT && 447 chan->sec_level != BT_SECURITY_SDP) 448 reason = ECONNREFUSED; 449 else 450 reason = ETIMEDOUT; 451 452 l2cap_chan_close(chan, reason); 453 454 chan->ops->close(chan); 455 456 l2cap_chan_unlock(chan); 457 l2cap_chan_put(chan); 458 459 mutex_unlock(&conn->chan_lock); 460 } 461 462 struct l2cap_chan *l2cap_chan_create(void) 463 { 464 struct l2cap_chan *chan; 465 466 chan = kzalloc(sizeof(*chan), GFP_ATOMIC); 467 if (!chan) 468 return NULL; 469 470 skb_queue_head_init(&chan->tx_q); 471 skb_queue_head_init(&chan->srej_q); 472 mutex_init(&chan->lock); 473 474 /* Set default lock nesting level */ 475 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); 476 477 write_lock(&chan_list_lock); 478 list_add(&chan->global_l, &chan_list); 479 write_unlock(&chan_list_lock); 480 481 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); 482 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 483 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 484 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); 485 486 chan->state = BT_OPEN; 487 488 kref_init(&chan->kref); 489 490 /* This flag is cleared in l2cap_chan_ready() */ 491 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 492 493 BT_DBG("chan %p", chan); 494 495 return chan; 496 } 497 EXPORT_SYMBOL_GPL(l2cap_chan_create); 498 499 static void l2cap_chan_destroy(struct kref *kref) 500 { 501 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); 502 503 BT_DBG("chan %p", chan); 504 505 write_lock(&chan_list_lock); 506 list_del(&chan->global_l); 507 write_unlock(&chan_list_lock); 508 509 kfree(chan); 510 } 511 512 void l2cap_chan_hold(struct l2cap_chan *c) 513 { 514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 515 516 kref_get(&c->kref); 517 } 518 519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) 520 { 521 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 522 523 if (!kref_get_unless_zero(&c->kref)) 524 return NULL; 525 526 return c; 527 } 528 529 void l2cap_chan_put(struct l2cap_chan *c) 530 { 531 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 532 533 kref_put(&c->kref, l2cap_chan_destroy); 534 } 535 EXPORT_SYMBOL_GPL(l2cap_chan_put); 536 537 void l2cap_chan_set_defaults(struct l2cap_chan *chan) 538 { 539 chan->fcs = L2CAP_FCS_CRC16; 540 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 541 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 542 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 543 chan->remote_max_tx = chan->max_tx; 544 chan->remote_tx_win = chan->tx_win; 545 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; 546 chan->sec_level = BT_SECURITY_LOW; 547 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 548 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; 549 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 550 551 chan->conf_state = 0; 552 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 553 554 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 555 } 556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); 557 558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) 559 { 560 chan->sdu = NULL; 561 chan->sdu_last_frag = NULL; 562 chan->sdu_len = 0; 563 chan->tx_credits = tx_credits; 564 /* Derive MPS from connection MTU to stop HCI fragmentation */ 565 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); 566 /* Give enough credits for a full packet */ 567 chan->rx_credits = (chan->imtu / chan->mps) + 1; 568 569 skb_queue_head_init(&chan->tx_q); 570 } 571 572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) 573 { 574 l2cap_le_flowctl_init(chan, tx_credits); 575 576 /* L2CAP implementations shall support a minimum MPS of 64 octets */ 577 if (chan->mps < L2CAP_ECRED_MIN_MPS) { 578 chan->mps = L2CAP_ECRED_MIN_MPS; 579 chan->rx_credits = (chan->imtu / chan->mps) + 1; 580 } 581 } 582 583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 584 { 585 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 586 __le16_to_cpu(chan->psm), chan->dcid); 587 588 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 589 590 chan->conn = conn; 591 592 switch (chan->chan_type) { 593 case L2CAP_CHAN_CONN_ORIENTED: 594 /* Alloc CID for connection-oriented socket */ 595 chan->scid = l2cap_alloc_cid(conn); 596 if (conn->hcon->type == ACL_LINK) 597 chan->omtu = L2CAP_DEFAULT_MTU; 598 break; 599 600 case L2CAP_CHAN_CONN_LESS: 601 /* Connectionless socket */ 602 chan->scid = L2CAP_CID_CONN_LESS; 603 chan->dcid = L2CAP_CID_CONN_LESS; 604 chan->omtu = L2CAP_DEFAULT_MTU; 605 break; 606 607 case L2CAP_CHAN_FIXED: 608 /* Caller will set CID and CID specific MTU values */ 609 break; 610 611 default: 612 /* Raw socket can send/recv signalling messages only */ 613 chan->scid = L2CAP_CID_SIGNALING; 614 chan->dcid = L2CAP_CID_SIGNALING; 615 chan->omtu = L2CAP_DEFAULT_MTU; 616 } 617 618 chan->local_id = L2CAP_BESTEFFORT_ID; 619 chan->local_stype = L2CAP_SERV_BESTEFFORT; 620 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; 621 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; 622 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; 623 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; 624 625 l2cap_chan_hold(chan); 626 627 /* Only keep a reference for fixed channels if they requested it */ 628 if (chan->chan_type != L2CAP_CHAN_FIXED || 629 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 630 hci_conn_hold(conn->hcon); 631 632 list_add(&chan->list, &conn->chan_l); 633 } 634 635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 636 { 637 mutex_lock(&conn->chan_lock); 638 __l2cap_chan_add(conn, chan); 639 mutex_unlock(&conn->chan_lock); 640 } 641 642 void l2cap_chan_del(struct l2cap_chan *chan, int err) 643 { 644 struct l2cap_conn *conn = chan->conn; 645 646 __clear_chan_timer(chan); 647 648 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, 649 state_to_string(chan->state)); 650 651 chan->ops->teardown(chan, err); 652 653 if (conn) { 654 struct amp_mgr *mgr = conn->hcon->amp_mgr; 655 /* Delete from channel list */ 656 list_del(&chan->list); 657 658 l2cap_chan_put(chan); 659 660 chan->conn = NULL; 661 662 /* Reference was only held for non-fixed channels or 663 * fixed channels that explicitly requested it using the 664 * FLAG_HOLD_HCI_CONN flag. 665 */ 666 if (chan->chan_type != L2CAP_CHAN_FIXED || 667 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 668 hci_conn_drop(conn->hcon); 669 670 if (mgr && mgr->bredr_chan == chan) 671 mgr->bredr_chan = NULL; 672 } 673 674 if (chan->hs_hchan) { 675 struct hci_chan *hs_hchan = chan->hs_hchan; 676 677 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan); 678 amp_disconnect_logical_link(hs_hchan); 679 } 680 681 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) 682 return; 683 684 switch (chan->mode) { 685 case L2CAP_MODE_BASIC: 686 break; 687 688 case L2CAP_MODE_LE_FLOWCTL: 689 case L2CAP_MODE_EXT_FLOWCTL: 690 skb_queue_purge(&chan->tx_q); 691 break; 692 693 case L2CAP_MODE_ERTM: 694 __clear_retrans_timer(chan); 695 __clear_monitor_timer(chan); 696 __clear_ack_timer(chan); 697 698 skb_queue_purge(&chan->srej_q); 699 700 l2cap_seq_list_free(&chan->srej_list); 701 l2cap_seq_list_free(&chan->retrans_list); 702 fallthrough; 703 704 case L2CAP_MODE_STREAMING: 705 skb_queue_purge(&chan->tx_q); 706 break; 707 } 708 } 709 EXPORT_SYMBOL_GPL(l2cap_chan_del); 710 711 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, 712 l2cap_chan_func_t func, void *data) 713 { 714 struct l2cap_chan *chan, *l; 715 716 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 717 if (chan->ident == id) 718 func(chan, data); 719 } 720 } 721 722 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, 723 void *data) 724 { 725 struct l2cap_chan *chan; 726 727 list_for_each_entry(chan, &conn->chan_l, list) { 728 func(chan, data); 729 } 730 } 731 732 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, 733 void *data) 734 { 735 if (!conn) 736 return; 737 738 mutex_lock(&conn->chan_lock); 739 __l2cap_chan_list(conn, func, data); 740 mutex_unlock(&conn->chan_lock); 741 } 742 743 EXPORT_SYMBOL_GPL(l2cap_chan_list); 744 745 static void l2cap_conn_update_id_addr(struct work_struct *work) 746 { 747 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 748 id_addr_update_work); 749 struct hci_conn *hcon = conn->hcon; 750 struct l2cap_chan *chan; 751 752 mutex_lock(&conn->chan_lock); 753 754 list_for_each_entry(chan, &conn->chan_l, list) { 755 l2cap_chan_lock(chan); 756 bacpy(&chan->dst, &hcon->dst); 757 chan->dst_type = bdaddr_dst_type(hcon); 758 l2cap_chan_unlock(chan); 759 } 760 761 mutex_unlock(&conn->chan_lock); 762 } 763 764 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) 765 { 766 struct l2cap_conn *conn = chan->conn; 767 struct l2cap_le_conn_rsp rsp; 768 u16 result; 769 770 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 771 result = L2CAP_CR_LE_AUTHORIZATION; 772 else 773 result = L2CAP_CR_LE_BAD_PSM; 774 775 l2cap_state_change(chan, BT_DISCONN); 776 777 rsp.dcid = cpu_to_le16(chan->scid); 778 rsp.mtu = cpu_to_le16(chan->imtu); 779 rsp.mps = cpu_to_le16(chan->mps); 780 rsp.credits = cpu_to_le16(chan->rx_credits); 781 rsp.result = cpu_to_le16(result); 782 783 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 784 &rsp); 785 } 786 787 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) 788 { 789 l2cap_state_change(chan, BT_DISCONN); 790 791 __l2cap_ecred_conn_rsp_defer(chan); 792 } 793 794 static void l2cap_chan_connect_reject(struct l2cap_chan *chan) 795 { 796 struct l2cap_conn *conn = chan->conn; 797 struct l2cap_conn_rsp rsp; 798 u16 result; 799 800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 801 result = L2CAP_CR_SEC_BLOCK; 802 else 803 result = L2CAP_CR_BAD_PSM; 804 805 l2cap_state_change(chan, BT_DISCONN); 806 807 rsp.scid = cpu_to_le16(chan->dcid); 808 rsp.dcid = cpu_to_le16(chan->scid); 809 rsp.result = cpu_to_le16(result); 810 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 811 812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); 813 } 814 815 void l2cap_chan_close(struct l2cap_chan *chan, int reason) 816 { 817 struct l2cap_conn *conn = chan->conn; 818 819 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 820 821 switch (chan->state) { 822 case BT_LISTEN: 823 chan->ops->teardown(chan, 0); 824 break; 825 826 case BT_CONNECTED: 827 case BT_CONFIG: 828 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 829 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 830 l2cap_send_disconn_req(chan, reason); 831 } else 832 l2cap_chan_del(chan, reason); 833 break; 834 835 case BT_CONNECT2: 836 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 837 if (conn->hcon->type == ACL_LINK) 838 l2cap_chan_connect_reject(chan); 839 else if (conn->hcon->type == LE_LINK) { 840 switch (chan->mode) { 841 case L2CAP_MODE_LE_FLOWCTL: 842 l2cap_chan_le_connect_reject(chan); 843 break; 844 case L2CAP_MODE_EXT_FLOWCTL: 845 l2cap_chan_ecred_connect_reject(chan); 846 return; 847 } 848 } 849 } 850 851 l2cap_chan_del(chan, reason); 852 break; 853 854 case BT_CONNECT: 855 case BT_DISCONN: 856 l2cap_chan_del(chan, reason); 857 break; 858 859 default: 860 chan->ops->teardown(chan, 0); 861 break; 862 } 863 } 864 EXPORT_SYMBOL(l2cap_chan_close); 865 866 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 867 { 868 switch (chan->chan_type) { 869 case L2CAP_CHAN_RAW: 870 switch (chan->sec_level) { 871 case BT_SECURITY_HIGH: 872 case BT_SECURITY_FIPS: 873 return HCI_AT_DEDICATED_BONDING_MITM; 874 case BT_SECURITY_MEDIUM: 875 return HCI_AT_DEDICATED_BONDING; 876 default: 877 return HCI_AT_NO_BONDING; 878 } 879 break; 880 case L2CAP_CHAN_CONN_LESS: 881 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { 882 if (chan->sec_level == BT_SECURITY_LOW) 883 chan->sec_level = BT_SECURITY_SDP; 884 } 885 if (chan->sec_level == BT_SECURITY_HIGH || 886 chan->sec_level == BT_SECURITY_FIPS) 887 return HCI_AT_NO_BONDING_MITM; 888 else 889 return HCI_AT_NO_BONDING; 890 break; 891 case L2CAP_CHAN_CONN_ORIENTED: 892 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { 893 if (chan->sec_level == BT_SECURITY_LOW) 894 chan->sec_level = BT_SECURITY_SDP; 895 896 if (chan->sec_level == BT_SECURITY_HIGH || 897 chan->sec_level == BT_SECURITY_FIPS) 898 return HCI_AT_NO_BONDING_MITM; 899 else 900 return HCI_AT_NO_BONDING; 901 } 902 fallthrough; 903 904 default: 905 switch (chan->sec_level) { 906 case BT_SECURITY_HIGH: 907 case BT_SECURITY_FIPS: 908 return HCI_AT_GENERAL_BONDING_MITM; 909 case BT_SECURITY_MEDIUM: 910 return HCI_AT_GENERAL_BONDING; 911 default: 912 return HCI_AT_NO_BONDING; 913 } 914 break; 915 } 916 } 917 918 /* Service level security */ 919 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) 920 { 921 struct l2cap_conn *conn = chan->conn; 922 __u8 auth_type; 923 924 if (conn->hcon->type == LE_LINK) 925 return smp_conn_security(conn->hcon, chan->sec_level); 926 927 auth_type = l2cap_get_auth_type(chan); 928 929 return hci_conn_security(conn->hcon, chan->sec_level, auth_type, 930 initiator); 931 } 932 933 static u8 l2cap_get_ident(struct l2cap_conn *conn) 934 { 935 u8 id; 936 937 /* Get next available identificator. 938 * 1 - 128 are used by kernel. 939 * 129 - 199 are reserved. 940 * 200 - 254 are used by utilities like l2ping, etc. 941 */ 942 943 mutex_lock(&conn->ident_lock); 944 945 if (++conn->tx_ident > 128) 946 conn->tx_ident = 1; 947 948 id = conn->tx_ident; 949 950 mutex_unlock(&conn->ident_lock); 951 952 return id; 953 } 954 955 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 956 void *data) 957 { 958 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 959 u8 flags; 960 961 BT_DBG("code 0x%2.2x", code); 962 963 if (!skb) 964 return; 965 966 /* Use NO_FLUSH if supported or we have an LE link (which does 967 * not support auto-flushing packets) */ 968 if (lmp_no_flush_capable(conn->hcon->hdev) || 969 conn->hcon->type == LE_LINK) 970 flags = ACL_START_NO_FLUSH; 971 else 972 flags = ACL_START; 973 974 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; 975 skb->priority = HCI_PRIO_MAX; 976 977 hci_send_acl(conn->hchan, skb, flags); 978 } 979 980 static bool __chan_is_moving(struct l2cap_chan *chan) 981 { 982 return chan->move_state != L2CAP_MOVE_STABLE && 983 chan->move_state != L2CAP_MOVE_WAIT_PREPARE; 984 } 985 986 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 987 { 988 struct hci_conn *hcon = chan->conn->hcon; 989 u16 flags; 990 991 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, 992 skb->priority); 993 994 if (chan->hs_hcon && !__chan_is_moving(chan)) { 995 if (chan->hs_hchan) 996 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE); 997 else 998 kfree_skb(skb); 999 1000 return; 1001 } 1002 1003 /* Use NO_FLUSH for LE links (where this is the only option) or 1004 * if the BR/EDR link supports it and flushing has not been 1005 * explicitly requested (through FLAG_FLUSHABLE). 1006 */ 1007 if (hcon->type == LE_LINK || 1008 (!test_bit(FLAG_FLUSHABLE, &chan->flags) && 1009 lmp_no_flush_capable(hcon->hdev))) 1010 flags = ACL_START_NO_FLUSH; 1011 else 1012 flags = ACL_START; 1013 1014 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); 1015 hci_send_acl(chan->conn->hchan, skb, flags); 1016 } 1017 1018 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) 1019 { 1020 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; 1021 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; 1022 1023 if (enh & L2CAP_CTRL_FRAME_TYPE) { 1024 /* S-Frame */ 1025 control->sframe = 1; 1026 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; 1027 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; 1028 1029 control->sar = 0; 1030 control->txseq = 0; 1031 } else { 1032 /* I-Frame */ 1033 control->sframe = 0; 1034 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; 1035 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; 1036 1037 control->poll = 0; 1038 control->super = 0; 1039 } 1040 } 1041 1042 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) 1043 { 1044 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; 1045 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; 1046 1047 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { 1048 /* S-Frame */ 1049 control->sframe = 1; 1050 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; 1051 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; 1052 1053 control->sar = 0; 1054 control->txseq = 0; 1055 } else { 1056 /* I-Frame */ 1057 control->sframe = 0; 1058 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; 1059 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; 1060 1061 control->poll = 0; 1062 control->super = 0; 1063 } 1064 } 1065 1066 static inline void __unpack_control(struct l2cap_chan *chan, 1067 struct sk_buff *skb) 1068 { 1069 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1070 __unpack_extended_control(get_unaligned_le32(skb->data), 1071 &bt_cb(skb)->l2cap); 1072 skb_pull(skb, L2CAP_EXT_CTRL_SIZE); 1073 } else { 1074 __unpack_enhanced_control(get_unaligned_le16(skb->data), 1075 &bt_cb(skb)->l2cap); 1076 skb_pull(skb, L2CAP_ENH_CTRL_SIZE); 1077 } 1078 } 1079 1080 static u32 __pack_extended_control(struct l2cap_ctrl *control) 1081 { 1082 u32 packed; 1083 1084 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; 1085 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; 1086 1087 if (control->sframe) { 1088 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; 1089 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; 1090 packed |= L2CAP_EXT_CTRL_FRAME_TYPE; 1091 } else { 1092 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; 1093 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; 1094 } 1095 1096 return packed; 1097 } 1098 1099 static u16 __pack_enhanced_control(struct l2cap_ctrl *control) 1100 { 1101 u16 packed; 1102 1103 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; 1104 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; 1105 1106 if (control->sframe) { 1107 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; 1108 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; 1109 packed |= L2CAP_CTRL_FRAME_TYPE; 1110 } else { 1111 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; 1112 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; 1113 } 1114 1115 return packed; 1116 } 1117 1118 static inline void __pack_control(struct l2cap_chan *chan, 1119 struct l2cap_ctrl *control, 1120 struct sk_buff *skb) 1121 { 1122 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1123 put_unaligned_le32(__pack_extended_control(control), 1124 skb->data + L2CAP_HDR_SIZE); 1125 } else { 1126 put_unaligned_le16(__pack_enhanced_control(control), 1127 skb->data + L2CAP_HDR_SIZE); 1128 } 1129 } 1130 1131 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) 1132 { 1133 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1134 return L2CAP_EXT_HDR_SIZE; 1135 else 1136 return L2CAP_ENH_HDR_SIZE; 1137 } 1138 1139 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, 1140 u32 control) 1141 { 1142 struct sk_buff *skb; 1143 struct l2cap_hdr *lh; 1144 int hlen = __ertm_hdr_size(chan); 1145 1146 if (chan->fcs == L2CAP_FCS_CRC16) 1147 hlen += L2CAP_FCS_SIZE; 1148 1149 skb = bt_skb_alloc(hlen, GFP_KERNEL); 1150 1151 if (!skb) 1152 return ERR_PTR(-ENOMEM); 1153 1154 lh = skb_put(skb, L2CAP_HDR_SIZE); 1155 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 1156 lh->cid = cpu_to_le16(chan->dcid); 1157 1158 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1159 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 1160 else 1161 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 1162 1163 if (chan->fcs == L2CAP_FCS_CRC16) { 1164 u16 fcs = crc16(0, (u8 *)skb->data, skb->len); 1165 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1166 } 1167 1168 skb->priority = HCI_PRIO_MAX; 1169 return skb; 1170 } 1171 1172 static void l2cap_send_sframe(struct l2cap_chan *chan, 1173 struct l2cap_ctrl *control) 1174 { 1175 struct sk_buff *skb; 1176 u32 control_field; 1177 1178 BT_DBG("chan %p, control %p", chan, control); 1179 1180 if (!control->sframe) 1181 return; 1182 1183 if (__chan_is_moving(chan)) 1184 return; 1185 1186 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && 1187 !control->poll) 1188 control->final = 1; 1189 1190 if (control->super == L2CAP_SUPER_RR) 1191 clear_bit(CONN_RNR_SENT, &chan->conn_state); 1192 else if (control->super == L2CAP_SUPER_RNR) 1193 set_bit(CONN_RNR_SENT, &chan->conn_state); 1194 1195 if (control->super != L2CAP_SUPER_SREJ) { 1196 chan->last_acked_seq = control->reqseq; 1197 __clear_ack_timer(chan); 1198 } 1199 1200 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, 1201 control->final, control->poll, control->super); 1202 1203 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1204 control_field = __pack_extended_control(control); 1205 else 1206 control_field = __pack_enhanced_control(control); 1207 1208 skb = l2cap_create_sframe_pdu(chan, control_field); 1209 if (!IS_ERR(skb)) 1210 l2cap_do_send(chan, skb); 1211 } 1212 1213 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) 1214 { 1215 struct l2cap_ctrl control; 1216 1217 BT_DBG("chan %p, poll %d", chan, poll); 1218 1219 memset(&control, 0, sizeof(control)); 1220 control.sframe = 1; 1221 control.poll = poll; 1222 1223 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 1224 control.super = L2CAP_SUPER_RNR; 1225 else 1226 control.super = L2CAP_SUPER_RR; 1227 1228 control.reqseq = chan->buffer_seq; 1229 l2cap_send_sframe(chan, &control); 1230 } 1231 1232 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 1233 { 1234 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 1235 return true; 1236 1237 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 1238 } 1239 1240 static bool __amp_capable(struct l2cap_chan *chan) 1241 { 1242 struct l2cap_conn *conn = chan->conn; 1243 struct hci_dev *hdev; 1244 bool amp_available = false; 1245 1246 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) 1247 return false; 1248 1249 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP)) 1250 return false; 1251 1252 read_lock(&hci_dev_list_lock); 1253 list_for_each_entry(hdev, &hci_dev_list, list) { 1254 if (hdev->amp_type != AMP_TYPE_BREDR && 1255 test_bit(HCI_UP, &hdev->flags)) { 1256 amp_available = true; 1257 break; 1258 } 1259 } 1260 read_unlock(&hci_dev_list_lock); 1261 1262 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED) 1263 return amp_available; 1264 1265 return false; 1266 } 1267 1268 static bool l2cap_check_efs(struct l2cap_chan *chan) 1269 { 1270 /* Check EFS parameters */ 1271 return true; 1272 } 1273 1274 void l2cap_send_conn_req(struct l2cap_chan *chan) 1275 { 1276 struct l2cap_conn *conn = chan->conn; 1277 struct l2cap_conn_req req; 1278 1279 req.scid = cpu_to_le16(chan->scid); 1280 req.psm = chan->psm; 1281 1282 chan->ident = l2cap_get_ident(conn); 1283 1284 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 1285 1286 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); 1287 } 1288 1289 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id) 1290 { 1291 struct l2cap_create_chan_req req; 1292 req.scid = cpu_to_le16(chan->scid); 1293 req.psm = chan->psm; 1294 req.amp_id = amp_id; 1295 1296 chan->ident = l2cap_get_ident(chan->conn); 1297 1298 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ, 1299 sizeof(req), &req); 1300 } 1301 1302 static void l2cap_move_setup(struct l2cap_chan *chan) 1303 { 1304 struct sk_buff *skb; 1305 1306 BT_DBG("chan %p", chan); 1307 1308 if (chan->mode != L2CAP_MODE_ERTM) 1309 return; 1310 1311 __clear_retrans_timer(chan); 1312 __clear_monitor_timer(chan); 1313 __clear_ack_timer(chan); 1314 1315 chan->retry_count = 0; 1316 skb_queue_walk(&chan->tx_q, skb) { 1317 if (bt_cb(skb)->l2cap.retries) 1318 bt_cb(skb)->l2cap.retries = 1; 1319 else 1320 break; 1321 } 1322 1323 chan->expected_tx_seq = chan->buffer_seq; 1324 1325 clear_bit(CONN_REJ_ACT, &chan->conn_state); 1326 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 1327 l2cap_seq_list_clear(&chan->retrans_list); 1328 l2cap_seq_list_clear(&chan->srej_list); 1329 skb_queue_purge(&chan->srej_q); 1330 1331 chan->tx_state = L2CAP_TX_STATE_XMIT; 1332 chan->rx_state = L2CAP_RX_STATE_MOVE; 1333 1334 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 1335 } 1336 1337 static void l2cap_move_done(struct l2cap_chan *chan) 1338 { 1339 u8 move_role = chan->move_role; 1340 BT_DBG("chan %p", chan); 1341 1342 chan->move_state = L2CAP_MOVE_STABLE; 1343 chan->move_role = L2CAP_MOVE_ROLE_NONE; 1344 1345 if (chan->mode != L2CAP_MODE_ERTM) 1346 return; 1347 1348 switch (move_role) { 1349 case L2CAP_MOVE_ROLE_INITIATOR: 1350 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL); 1351 chan->rx_state = L2CAP_RX_STATE_WAIT_F; 1352 break; 1353 case L2CAP_MOVE_ROLE_RESPONDER: 1354 chan->rx_state = L2CAP_RX_STATE_WAIT_P; 1355 break; 1356 } 1357 } 1358 1359 static void l2cap_chan_ready(struct l2cap_chan *chan) 1360 { 1361 /* The channel may have already been flagged as connected in 1362 * case of receiving data before the L2CAP info req/rsp 1363 * procedure is complete. 1364 */ 1365 if (chan->state == BT_CONNECTED) 1366 return; 1367 1368 /* This clears all conf flags, including CONF_NOT_COMPLETE */ 1369 chan->conf_state = 0; 1370 __clear_chan_timer(chan); 1371 1372 switch (chan->mode) { 1373 case L2CAP_MODE_LE_FLOWCTL: 1374 case L2CAP_MODE_EXT_FLOWCTL: 1375 if (!chan->tx_credits) 1376 chan->ops->suspend(chan); 1377 break; 1378 } 1379 1380 chan->state = BT_CONNECTED; 1381 1382 chan->ops->ready(chan); 1383 } 1384 1385 static void l2cap_le_connect(struct l2cap_chan *chan) 1386 { 1387 struct l2cap_conn *conn = chan->conn; 1388 struct l2cap_le_conn_req req; 1389 1390 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) 1391 return; 1392 1393 if (!chan->imtu) 1394 chan->imtu = chan->conn->mtu; 1395 1396 l2cap_le_flowctl_init(chan, 0); 1397 1398 memset(&req, 0, sizeof(req)); 1399 req.psm = chan->psm; 1400 req.scid = cpu_to_le16(chan->scid); 1401 req.mtu = cpu_to_le16(chan->imtu); 1402 req.mps = cpu_to_le16(chan->mps); 1403 req.credits = cpu_to_le16(chan->rx_credits); 1404 1405 chan->ident = l2cap_get_ident(conn); 1406 1407 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, 1408 sizeof(req), &req); 1409 } 1410 1411 struct l2cap_ecred_conn_data { 1412 struct { 1413 struct l2cap_ecred_conn_req req; 1414 __le16 scid[5]; 1415 } __packed pdu; 1416 struct l2cap_chan *chan; 1417 struct pid *pid; 1418 int count; 1419 }; 1420 1421 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) 1422 { 1423 struct l2cap_ecred_conn_data *conn = data; 1424 struct pid *pid; 1425 1426 if (chan == conn->chan) 1427 return; 1428 1429 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) 1430 return; 1431 1432 pid = chan->ops->get_peer_pid(chan); 1433 1434 /* Only add deferred channels with the same PID/PSM */ 1435 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || 1436 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) 1437 return; 1438 1439 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 1440 return; 1441 1442 l2cap_ecred_init(chan, 0); 1443 1444 /* Set the same ident so we can match on the rsp */ 1445 chan->ident = conn->chan->ident; 1446 1447 /* Include all channels deferred */ 1448 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); 1449 1450 conn->count++; 1451 } 1452 1453 static void l2cap_ecred_connect(struct l2cap_chan *chan) 1454 { 1455 struct l2cap_conn *conn = chan->conn; 1456 struct l2cap_ecred_conn_data data; 1457 1458 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 1459 return; 1460 1461 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 1462 return; 1463 1464 l2cap_ecred_init(chan, 0); 1465 1466 memset(&data, 0, sizeof(data)); 1467 data.pdu.req.psm = chan->psm; 1468 data.pdu.req.mtu = cpu_to_le16(chan->imtu); 1469 data.pdu.req.mps = cpu_to_le16(chan->mps); 1470 data.pdu.req.credits = cpu_to_le16(chan->rx_credits); 1471 data.pdu.scid[0] = cpu_to_le16(chan->scid); 1472 1473 chan->ident = l2cap_get_ident(conn); 1474 1475 data.count = 1; 1476 data.chan = chan; 1477 data.pid = chan->ops->get_peer_pid(chan); 1478 1479 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); 1480 1481 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, 1482 sizeof(data.pdu.req) + data.count * sizeof(__le16), 1483 &data.pdu); 1484 } 1485 1486 static void l2cap_le_start(struct l2cap_chan *chan) 1487 { 1488 struct l2cap_conn *conn = chan->conn; 1489 1490 if (!smp_conn_security(conn->hcon, chan->sec_level)) 1491 return; 1492 1493 if (!chan->psm) { 1494 l2cap_chan_ready(chan); 1495 return; 1496 } 1497 1498 if (chan->state == BT_CONNECT) { 1499 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) 1500 l2cap_ecred_connect(chan); 1501 else 1502 l2cap_le_connect(chan); 1503 } 1504 } 1505 1506 static void l2cap_start_connection(struct l2cap_chan *chan) 1507 { 1508 if (__amp_capable(chan)) { 1509 BT_DBG("chan %p AMP capable: discover AMPs", chan); 1510 a2mp_discover_amp(chan); 1511 } else if (chan->conn->hcon->type == LE_LINK) { 1512 l2cap_le_start(chan); 1513 } else { 1514 l2cap_send_conn_req(chan); 1515 } 1516 } 1517 1518 static void l2cap_request_info(struct l2cap_conn *conn) 1519 { 1520 struct l2cap_info_req req; 1521 1522 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1523 return; 1524 1525 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 1526 1527 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 1528 conn->info_ident = l2cap_get_ident(conn); 1529 1530 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 1531 1532 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 1533 sizeof(req), &req); 1534 } 1535 1536 static bool l2cap_check_enc_key_size(struct hci_conn *hcon) 1537 { 1538 /* The minimum encryption key size needs to be enforced by the 1539 * host stack before establishing any L2CAP connections. The 1540 * specification in theory allows a minimum of 1, but to align 1541 * BR/EDR and LE transports, a minimum of 7 is chosen. 1542 * 1543 * This check might also be called for unencrypted connections 1544 * that have no key size requirements. Ensure that the link is 1545 * actually encrypted before enforcing a key size. 1546 */ 1547 int min_key_size = hcon->hdev->min_enc_key_size; 1548 1549 /* On FIPS security level, key size must be 16 bytes */ 1550 if (hcon->sec_level == BT_SECURITY_FIPS) 1551 min_key_size = 16; 1552 1553 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1554 hcon->enc_key_size >= min_key_size); 1555 } 1556 1557 static void l2cap_do_start(struct l2cap_chan *chan) 1558 { 1559 struct l2cap_conn *conn = chan->conn; 1560 1561 if (conn->hcon->type == LE_LINK) { 1562 l2cap_le_start(chan); 1563 return; 1564 } 1565 1566 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { 1567 l2cap_request_info(conn); 1568 return; 1569 } 1570 1571 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 1572 return; 1573 1574 if (!l2cap_chan_check_security(chan, true) || 1575 !__l2cap_no_conn_pending(chan)) 1576 return; 1577 1578 if (l2cap_check_enc_key_size(conn->hcon)) 1579 l2cap_start_connection(chan); 1580 else 1581 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 1582 } 1583 1584 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 1585 { 1586 u32 local_feat_mask = l2cap_feat_mask; 1587 if (!disable_ertm) 1588 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; 1589 1590 switch (mode) { 1591 case L2CAP_MODE_ERTM: 1592 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; 1593 case L2CAP_MODE_STREAMING: 1594 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; 1595 default: 1596 return 0x00; 1597 } 1598 } 1599 1600 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) 1601 { 1602 struct l2cap_conn *conn = chan->conn; 1603 struct l2cap_disconn_req req; 1604 1605 if (!conn) 1606 return; 1607 1608 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { 1609 __clear_retrans_timer(chan); 1610 __clear_monitor_timer(chan); 1611 __clear_ack_timer(chan); 1612 } 1613 1614 if (chan->scid == L2CAP_CID_A2MP) { 1615 l2cap_state_change(chan, BT_DISCONN); 1616 return; 1617 } 1618 1619 req.dcid = cpu_to_le16(chan->dcid); 1620 req.scid = cpu_to_le16(chan->scid); 1621 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, 1622 sizeof(req), &req); 1623 1624 l2cap_state_change_and_error(chan, BT_DISCONN, err); 1625 } 1626 1627 /* ---- L2CAP connections ---- */ 1628 static void l2cap_conn_start(struct l2cap_conn *conn) 1629 { 1630 struct l2cap_chan *chan, *tmp; 1631 1632 BT_DBG("conn %p", conn); 1633 1634 mutex_lock(&conn->chan_lock); 1635 1636 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 1637 l2cap_chan_lock(chan); 1638 1639 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1640 l2cap_chan_ready(chan); 1641 l2cap_chan_unlock(chan); 1642 continue; 1643 } 1644 1645 if (chan->state == BT_CONNECT) { 1646 if (!l2cap_chan_check_security(chan, true) || 1647 !__l2cap_no_conn_pending(chan)) { 1648 l2cap_chan_unlock(chan); 1649 continue; 1650 } 1651 1652 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 1653 && test_bit(CONF_STATE2_DEVICE, 1654 &chan->conf_state)) { 1655 l2cap_chan_close(chan, ECONNRESET); 1656 l2cap_chan_unlock(chan); 1657 continue; 1658 } 1659 1660 if (l2cap_check_enc_key_size(conn->hcon)) 1661 l2cap_start_connection(chan); 1662 else 1663 l2cap_chan_close(chan, ECONNREFUSED); 1664 1665 } else if (chan->state == BT_CONNECT2) { 1666 struct l2cap_conn_rsp rsp; 1667 char buf[128]; 1668 rsp.scid = cpu_to_le16(chan->dcid); 1669 rsp.dcid = cpu_to_le16(chan->scid); 1670 1671 if (l2cap_chan_check_security(chan, false)) { 1672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 1673 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1674 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1675 chan->ops->defer(chan); 1676 1677 } else { 1678 l2cap_state_change(chan, BT_CONFIG); 1679 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1680 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1681 } 1682 } else { 1683 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1684 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1685 } 1686 1687 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1688 sizeof(rsp), &rsp); 1689 1690 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 1691 rsp.result != L2CAP_CR_SUCCESS) { 1692 l2cap_chan_unlock(chan); 1693 continue; 1694 } 1695 1696 set_bit(CONF_REQ_SENT, &chan->conf_state); 1697 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1698 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 1699 chan->num_conf_req++; 1700 } 1701 1702 l2cap_chan_unlock(chan); 1703 } 1704 1705 mutex_unlock(&conn->chan_lock); 1706 } 1707 1708 static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1709 { 1710 struct hci_conn *hcon = conn->hcon; 1711 struct hci_dev *hdev = hcon->hdev; 1712 1713 BT_DBG("%s conn %p", hdev->name, conn); 1714 1715 /* For outgoing pairing which doesn't necessarily have an 1716 * associated socket (e.g. mgmt_pair_device). 1717 */ 1718 if (hcon->out) 1719 smp_conn_security(hcon, hcon->pending_sec_level); 1720 1721 /* For LE peripheral connections, make sure the connection interval 1722 * is in the range of the minimum and maximum interval that has 1723 * been configured for this connection. If not, then trigger 1724 * the connection update procedure. 1725 */ 1726 if (hcon->role == HCI_ROLE_SLAVE && 1727 (hcon->le_conn_interval < hcon->le_conn_min_interval || 1728 hcon->le_conn_interval > hcon->le_conn_max_interval)) { 1729 struct l2cap_conn_param_update_req req; 1730 1731 req.min = cpu_to_le16(hcon->le_conn_min_interval); 1732 req.max = cpu_to_le16(hcon->le_conn_max_interval); 1733 req.latency = cpu_to_le16(hcon->le_conn_latency); 1734 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); 1735 1736 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1737 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); 1738 } 1739 } 1740 1741 static void l2cap_conn_ready(struct l2cap_conn *conn) 1742 { 1743 struct l2cap_chan *chan; 1744 struct hci_conn *hcon = conn->hcon; 1745 1746 BT_DBG("conn %p", conn); 1747 1748 if (hcon->type == ACL_LINK) 1749 l2cap_request_info(conn); 1750 1751 mutex_lock(&conn->chan_lock); 1752 1753 list_for_each_entry(chan, &conn->chan_l, list) { 1754 1755 l2cap_chan_lock(chan); 1756 1757 if (chan->scid == L2CAP_CID_A2MP) { 1758 l2cap_chan_unlock(chan); 1759 continue; 1760 } 1761 1762 if (hcon->type == LE_LINK) { 1763 l2cap_le_start(chan); 1764 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1765 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 1766 l2cap_chan_ready(chan); 1767 } else if (chan->state == BT_CONNECT) { 1768 l2cap_do_start(chan); 1769 } 1770 1771 l2cap_chan_unlock(chan); 1772 } 1773 1774 mutex_unlock(&conn->chan_lock); 1775 1776 if (hcon->type == LE_LINK) 1777 l2cap_le_conn_ready(conn); 1778 1779 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); 1780 } 1781 1782 /* Notify sockets that we cannot guaranty reliability anymore */ 1783 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 1784 { 1785 struct l2cap_chan *chan; 1786 1787 BT_DBG("conn %p", conn); 1788 1789 mutex_lock(&conn->chan_lock); 1790 1791 list_for_each_entry(chan, &conn->chan_l, list) { 1792 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1793 l2cap_chan_set_err(chan, err); 1794 } 1795 1796 mutex_unlock(&conn->chan_lock); 1797 } 1798 1799 static void l2cap_info_timeout(struct work_struct *work) 1800 { 1801 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1802 info_timer.work); 1803 1804 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 1805 conn->info_ident = 0; 1806 1807 l2cap_conn_start(conn); 1808 } 1809 1810 /* 1811 * l2cap_user 1812 * External modules can register l2cap_user objects on l2cap_conn. The ->probe 1813 * callback is called during registration. The ->remove callback is called 1814 * during unregistration. 1815 * An l2cap_user object can either be explicitly unregistered or when the 1816 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, 1817 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. 1818 * External modules must own a reference to the l2cap_conn object if they intend 1819 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at 1820 * any time if they don't. 1821 */ 1822 1823 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) 1824 { 1825 struct hci_dev *hdev = conn->hcon->hdev; 1826 int ret; 1827 1828 /* We need to check whether l2cap_conn is registered. If it is not, we 1829 * must not register the l2cap_user. l2cap_conn_del() is unregisters 1830 * l2cap_conn objects, but doesn't provide its own locking. Instead, it 1831 * relies on the parent hci_conn object to be locked. This itself relies 1832 * on the hci_dev object to be locked. So we must lock the hci device 1833 * here, too. */ 1834 1835 hci_dev_lock(hdev); 1836 1837 if (!list_empty(&user->list)) { 1838 ret = -EINVAL; 1839 goto out_unlock; 1840 } 1841 1842 /* conn->hchan is NULL after l2cap_conn_del() was called */ 1843 if (!conn->hchan) { 1844 ret = -ENODEV; 1845 goto out_unlock; 1846 } 1847 1848 ret = user->probe(conn, user); 1849 if (ret) 1850 goto out_unlock; 1851 1852 list_add(&user->list, &conn->users); 1853 ret = 0; 1854 1855 out_unlock: 1856 hci_dev_unlock(hdev); 1857 return ret; 1858 } 1859 EXPORT_SYMBOL(l2cap_register_user); 1860 1861 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) 1862 { 1863 struct hci_dev *hdev = conn->hcon->hdev; 1864 1865 hci_dev_lock(hdev); 1866 1867 if (list_empty(&user->list)) 1868 goto out_unlock; 1869 1870 list_del_init(&user->list); 1871 user->remove(conn, user); 1872 1873 out_unlock: 1874 hci_dev_unlock(hdev); 1875 } 1876 EXPORT_SYMBOL(l2cap_unregister_user); 1877 1878 static void l2cap_unregister_all_users(struct l2cap_conn *conn) 1879 { 1880 struct l2cap_user *user; 1881 1882 while (!list_empty(&conn->users)) { 1883 user = list_first_entry(&conn->users, struct l2cap_user, list); 1884 list_del_init(&user->list); 1885 user->remove(conn, user); 1886 } 1887 } 1888 1889 static void l2cap_conn_del(struct hci_conn *hcon, int err) 1890 { 1891 struct l2cap_conn *conn = hcon->l2cap_data; 1892 struct l2cap_chan *chan, *l; 1893 1894 if (!conn) 1895 return; 1896 1897 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 1898 1899 kfree_skb(conn->rx_skb); 1900 1901 skb_queue_purge(&conn->pending_rx); 1902 1903 /* We can not call flush_work(&conn->pending_rx_work) here since we 1904 * might block if we are running on a worker from the same workqueue 1905 * pending_rx_work is waiting on. 1906 */ 1907 if (work_pending(&conn->pending_rx_work)) 1908 cancel_work_sync(&conn->pending_rx_work); 1909 1910 if (work_pending(&conn->id_addr_update_work)) 1911 cancel_work_sync(&conn->id_addr_update_work); 1912 1913 l2cap_unregister_all_users(conn); 1914 1915 /* Force the connection to be immediately dropped */ 1916 hcon->disc_timeout = 0; 1917 1918 mutex_lock(&conn->chan_lock); 1919 1920 /* Kill channels */ 1921 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1922 l2cap_chan_hold(chan); 1923 l2cap_chan_lock(chan); 1924 1925 l2cap_chan_del(chan, err); 1926 1927 chan->ops->close(chan); 1928 1929 l2cap_chan_unlock(chan); 1930 l2cap_chan_put(chan); 1931 } 1932 1933 mutex_unlock(&conn->chan_lock); 1934 1935 hci_chan_del(conn->hchan); 1936 1937 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1938 cancel_delayed_work_sync(&conn->info_timer); 1939 1940 hcon->l2cap_data = NULL; 1941 conn->hchan = NULL; 1942 l2cap_conn_put(conn); 1943 } 1944 1945 static void l2cap_conn_free(struct kref *ref) 1946 { 1947 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); 1948 1949 hci_conn_put(conn->hcon); 1950 kfree(conn); 1951 } 1952 1953 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) 1954 { 1955 kref_get(&conn->ref); 1956 return conn; 1957 } 1958 EXPORT_SYMBOL(l2cap_conn_get); 1959 1960 void l2cap_conn_put(struct l2cap_conn *conn) 1961 { 1962 kref_put(&conn->ref, l2cap_conn_free); 1963 } 1964 EXPORT_SYMBOL(l2cap_conn_put); 1965 1966 /* ---- Socket interface ---- */ 1967 1968 /* Find socket with psm and source / destination bdaddr. 1969 * Returns closest match. 1970 */ 1971 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, 1972 bdaddr_t *src, 1973 bdaddr_t *dst, 1974 u8 link_type) 1975 { 1976 struct l2cap_chan *c, *tmp, *c1 = NULL; 1977 1978 read_lock(&chan_list_lock); 1979 1980 list_for_each_entry_safe(c, tmp, &chan_list, global_l) { 1981 if (state && c->state != state) 1982 continue; 1983 1984 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) 1985 continue; 1986 1987 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) 1988 continue; 1989 1990 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { 1991 int src_match, dst_match; 1992 int src_any, dst_any; 1993 1994 /* Exact match. */ 1995 src_match = !bacmp(&c->src, src); 1996 dst_match = !bacmp(&c->dst, dst); 1997 if (src_match && dst_match) { 1998 if (!l2cap_chan_hold_unless_zero(c)) 1999 continue; 2000 2001 read_unlock(&chan_list_lock); 2002 return c; 2003 } 2004 2005 /* Closest match */ 2006 src_any = !bacmp(&c->src, BDADDR_ANY); 2007 dst_any = !bacmp(&c->dst, BDADDR_ANY); 2008 if ((src_match && dst_any) || (src_any && dst_match) || 2009 (src_any && dst_any)) 2010 c1 = c; 2011 } 2012 } 2013 2014 if (c1) 2015 c1 = l2cap_chan_hold_unless_zero(c1); 2016 2017 read_unlock(&chan_list_lock); 2018 2019 return c1; 2020 } 2021 2022 static void l2cap_monitor_timeout(struct work_struct *work) 2023 { 2024 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2025 monitor_timer.work); 2026 2027 BT_DBG("chan %p", chan); 2028 2029 l2cap_chan_lock(chan); 2030 2031 if (!chan->conn) { 2032 l2cap_chan_unlock(chan); 2033 l2cap_chan_put(chan); 2034 return; 2035 } 2036 2037 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); 2038 2039 l2cap_chan_unlock(chan); 2040 l2cap_chan_put(chan); 2041 } 2042 2043 static void l2cap_retrans_timeout(struct work_struct *work) 2044 { 2045 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2046 retrans_timer.work); 2047 2048 BT_DBG("chan %p", chan); 2049 2050 l2cap_chan_lock(chan); 2051 2052 if (!chan->conn) { 2053 l2cap_chan_unlock(chan); 2054 l2cap_chan_put(chan); 2055 return; 2056 } 2057 2058 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); 2059 l2cap_chan_unlock(chan); 2060 l2cap_chan_put(chan); 2061 } 2062 2063 static void l2cap_streaming_send(struct l2cap_chan *chan, 2064 struct sk_buff_head *skbs) 2065 { 2066 struct sk_buff *skb; 2067 struct l2cap_ctrl *control; 2068 2069 BT_DBG("chan %p, skbs %p", chan, skbs); 2070 2071 if (__chan_is_moving(chan)) 2072 return; 2073 2074 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2075 2076 while (!skb_queue_empty(&chan->tx_q)) { 2077 2078 skb = skb_dequeue(&chan->tx_q); 2079 2080 bt_cb(skb)->l2cap.retries = 1; 2081 control = &bt_cb(skb)->l2cap; 2082 2083 control->reqseq = 0; 2084 control->txseq = chan->next_tx_seq; 2085 2086 __pack_control(chan, control, skb); 2087 2088 if (chan->fcs == L2CAP_FCS_CRC16) { 2089 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 2090 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 2091 } 2092 2093 l2cap_do_send(chan, skb); 2094 2095 BT_DBG("Sent txseq %u", control->txseq); 2096 2097 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 2098 chan->frames_sent++; 2099 } 2100 } 2101 2102 static int l2cap_ertm_send(struct l2cap_chan *chan) 2103 { 2104 struct sk_buff *skb, *tx_skb; 2105 struct l2cap_ctrl *control; 2106 int sent = 0; 2107 2108 BT_DBG("chan %p", chan); 2109 2110 if (chan->state != BT_CONNECTED) 2111 return -ENOTCONN; 2112 2113 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2114 return 0; 2115 2116 if (__chan_is_moving(chan)) 2117 return 0; 2118 2119 while (chan->tx_send_head && 2120 chan->unacked_frames < chan->remote_tx_win && 2121 chan->tx_state == L2CAP_TX_STATE_XMIT) { 2122 2123 skb = chan->tx_send_head; 2124 2125 bt_cb(skb)->l2cap.retries = 1; 2126 control = &bt_cb(skb)->l2cap; 2127 2128 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 2129 control->final = 1; 2130 2131 control->reqseq = chan->buffer_seq; 2132 chan->last_acked_seq = chan->buffer_seq; 2133 control->txseq = chan->next_tx_seq; 2134 2135 __pack_control(chan, control, skb); 2136 2137 if (chan->fcs == L2CAP_FCS_CRC16) { 2138 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 2139 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 2140 } 2141 2142 /* Clone after data has been modified. Data is assumed to be 2143 read-only (for locking purposes) on cloned sk_buffs. 2144 */ 2145 tx_skb = skb_clone(skb, GFP_KERNEL); 2146 2147 if (!tx_skb) 2148 break; 2149 2150 __set_retrans_timer(chan); 2151 2152 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 2153 chan->unacked_frames++; 2154 chan->frames_sent++; 2155 sent++; 2156 2157 if (skb_queue_is_last(&chan->tx_q, skb)) 2158 chan->tx_send_head = NULL; 2159 else 2160 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 2161 2162 l2cap_do_send(chan, tx_skb); 2163 BT_DBG("Sent txseq %u", control->txseq); 2164 } 2165 2166 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, 2167 chan->unacked_frames, skb_queue_len(&chan->tx_q)); 2168 2169 return sent; 2170 } 2171 2172 static void l2cap_ertm_resend(struct l2cap_chan *chan) 2173 { 2174 struct l2cap_ctrl control; 2175 struct sk_buff *skb; 2176 struct sk_buff *tx_skb; 2177 u16 seq; 2178 2179 BT_DBG("chan %p", chan); 2180 2181 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2182 return; 2183 2184 if (__chan_is_moving(chan)) 2185 return; 2186 2187 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { 2188 seq = l2cap_seq_list_pop(&chan->retrans_list); 2189 2190 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); 2191 if (!skb) { 2192 BT_DBG("Error: Can't retransmit seq %d, frame missing", 2193 seq); 2194 continue; 2195 } 2196 2197 bt_cb(skb)->l2cap.retries++; 2198 control = bt_cb(skb)->l2cap; 2199 2200 if (chan->max_tx != 0 && 2201 bt_cb(skb)->l2cap.retries > chan->max_tx) { 2202 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 2203 l2cap_send_disconn_req(chan, ECONNRESET); 2204 l2cap_seq_list_clear(&chan->retrans_list); 2205 break; 2206 } 2207 2208 control.reqseq = chan->buffer_seq; 2209 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 2210 control.final = 1; 2211 else 2212 control.final = 0; 2213 2214 if (skb_cloned(skb)) { 2215 /* Cloned sk_buffs are read-only, so we need a 2216 * writeable copy 2217 */ 2218 tx_skb = skb_copy(skb, GFP_KERNEL); 2219 } else { 2220 tx_skb = skb_clone(skb, GFP_KERNEL); 2221 } 2222 2223 if (!tx_skb) { 2224 l2cap_seq_list_clear(&chan->retrans_list); 2225 break; 2226 } 2227 2228 /* Update skb contents */ 2229 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 2230 put_unaligned_le32(__pack_extended_control(&control), 2231 tx_skb->data + L2CAP_HDR_SIZE); 2232 } else { 2233 put_unaligned_le16(__pack_enhanced_control(&control), 2234 tx_skb->data + L2CAP_HDR_SIZE); 2235 } 2236 2237 /* Update FCS */ 2238 if (chan->fcs == L2CAP_FCS_CRC16) { 2239 u16 fcs = crc16(0, (u8 *) tx_skb->data, 2240 tx_skb->len - L2CAP_FCS_SIZE); 2241 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - 2242 L2CAP_FCS_SIZE); 2243 } 2244 2245 l2cap_do_send(chan, tx_skb); 2246 2247 BT_DBG("Resent txseq %d", control.txseq); 2248 2249 chan->last_acked_seq = chan->buffer_seq; 2250 } 2251 } 2252 2253 static void l2cap_retransmit(struct l2cap_chan *chan, 2254 struct l2cap_ctrl *control) 2255 { 2256 BT_DBG("chan %p, control %p", chan, control); 2257 2258 l2cap_seq_list_append(&chan->retrans_list, control->reqseq); 2259 l2cap_ertm_resend(chan); 2260 } 2261 2262 static void l2cap_retransmit_all(struct l2cap_chan *chan, 2263 struct l2cap_ctrl *control) 2264 { 2265 struct sk_buff *skb; 2266 2267 BT_DBG("chan %p, control %p", chan, control); 2268 2269 if (control->poll) 2270 set_bit(CONN_SEND_FBIT, &chan->conn_state); 2271 2272 l2cap_seq_list_clear(&chan->retrans_list); 2273 2274 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2275 return; 2276 2277 if (chan->unacked_frames) { 2278 skb_queue_walk(&chan->tx_q, skb) { 2279 if (bt_cb(skb)->l2cap.txseq == control->reqseq || 2280 skb == chan->tx_send_head) 2281 break; 2282 } 2283 2284 skb_queue_walk_from(&chan->tx_q, skb) { 2285 if (skb == chan->tx_send_head) 2286 break; 2287 2288 l2cap_seq_list_append(&chan->retrans_list, 2289 bt_cb(skb)->l2cap.txseq); 2290 } 2291 2292 l2cap_ertm_resend(chan); 2293 } 2294 } 2295 2296 static void l2cap_send_ack(struct l2cap_chan *chan) 2297 { 2298 struct l2cap_ctrl control; 2299 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 2300 chan->last_acked_seq); 2301 int threshold; 2302 2303 BT_DBG("chan %p last_acked_seq %d buffer_seq %d", 2304 chan, chan->last_acked_seq, chan->buffer_seq); 2305 2306 memset(&control, 0, sizeof(control)); 2307 control.sframe = 1; 2308 2309 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 2310 chan->rx_state == L2CAP_RX_STATE_RECV) { 2311 __clear_ack_timer(chan); 2312 control.super = L2CAP_SUPER_RNR; 2313 control.reqseq = chan->buffer_seq; 2314 l2cap_send_sframe(chan, &control); 2315 } else { 2316 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { 2317 l2cap_ertm_send(chan); 2318 /* If any i-frames were sent, they included an ack */ 2319 if (chan->buffer_seq == chan->last_acked_seq) 2320 frames_to_ack = 0; 2321 } 2322 2323 /* Ack now if the window is 3/4ths full. 2324 * Calculate without mul or div 2325 */ 2326 threshold = chan->ack_win; 2327 threshold += threshold << 1; 2328 threshold >>= 2; 2329 2330 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, 2331 threshold); 2332 2333 if (frames_to_ack >= threshold) { 2334 __clear_ack_timer(chan); 2335 control.super = L2CAP_SUPER_RR; 2336 control.reqseq = chan->buffer_seq; 2337 l2cap_send_sframe(chan, &control); 2338 frames_to_ack = 0; 2339 } 2340 2341 if (frames_to_ack) 2342 __set_ack_timer(chan); 2343 } 2344 } 2345 2346 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 2347 struct msghdr *msg, int len, 2348 int count, struct sk_buff *skb) 2349 { 2350 struct l2cap_conn *conn = chan->conn; 2351 struct sk_buff **frag; 2352 int sent = 0; 2353 2354 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter)) 2355 return -EFAULT; 2356 2357 sent += count; 2358 len -= count; 2359 2360 /* Continuation fragments (no L2CAP header) */ 2361 frag = &skb_shinfo(skb)->frag_list; 2362 while (len) { 2363 struct sk_buff *tmp; 2364 2365 count = min_t(unsigned int, conn->mtu, len); 2366 2367 tmp = chan->ops->alloc_skb(chan, 0, count, 2368 msg->msg_flags & MSG_DONTWAIT); 2369 if (IS_ERR(tmp)) 2370 return PTR_ERR(tmp); 2371 2372 *frag = tmp; 2373 2374 if (!copy_from_iter_full(skb_put(*frag, count), count, 2375 &msg->msg_iter)) 2376 return -EFAULT; 2377 2378 sent += count; 2379 len -= count; 2380 2381 skb->len += (*frag)->len; 2382 skb->data_len += (*frag)->len; 2383 2384 frag = &(*frag)->next; 2385 } 2386 2387 return sent; 2388 } 2389 2390 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 2391 struct msghdr *msg, size_t len) 2392 { 2393 struct l2cap_conn *conn = chan->conn; 2394 struct sk_buff *skb; 2395 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 2396 struct l2cap_hdr *lh; 2397 2398 BT_DBG("chan %p psm 0x%2.2x len %zu", chan, 2399 __le16_to_cpu(chan->psm), len); 2400 2401 count = min_t(unsigned int, (conn->mtu - hlen), len); 2402 2403 skb = chan->ops->alloc_skb(chan, hlen, count, 2404 msg->msg_flags & MSG_DONTWAIT); 2405 if (IS_ERR(skb)) 2406 return skb; 2407 2408 /* Create L2CAP header */ 2409 lh = skb_put(skb, L2CAP_HDR_SIZE); 2410 lh->cid = cpu_to_le16(chan->dcid); 2411 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); 2412 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); 2413 2414 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2415 if (unlikely(err < 0)) { 2416 kfree_skb(skb); 2417 return ERR_PTR(err); 2418 } 2419 return skb; 2420 } 2421 2422 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 2423 struct msghdr *msg, size_t len) 2424 { 2425 struct l2cap_conn *conn = chan->conn; 2426 struct sk_buff *skb; 2427 int err, count; 2428 struct l2cap_hdr *lh; 2429 2430 BT_DBG("chan %p len %zu", chan, len); 2431 2432 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 2433 2434 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, 2435 msg->msg_flags & MSG_DONTWAIT); 2436 if (IS_ERR(skb)) 2437 return skb; 2438 2439 /* Create L2CAP header */ 2440 lh = skb_put(skb, L2CAP_HDR_SIZE); 2441 lh->cid = cpu_to_le16(chan->dcid); 2442 lh->len = cpu_to_le16(len); 2443 2444 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2445 if (unlikely(err < 0)) { 2446 kfree_skb(skb); 2447 return ERR_PTR(err); 2448 } 2449 return skb; 2450 } 2451 2452 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 2453 struct msghdr *msg, size_t len, 2454 u16 sdulen) 2455 { 2456 struct l2cap_conn *conn = chan->conn; 2457 struct sk_buff *skb; 2458 int err, count, hlen; 2459 struct l2cap_hdr *lh; 2460 2461 BT_DBG("chan %p len %zu", chan, len); 2462 2463 if (!conn) 2464 return ERR_PTR(-ENOTCONN); 2465 2466 hlen = __ertm_hdr_size(chan); 2467 2468 if (sdulen) 2469 hlen += L2CAP_SDULEN_SIZE; 2470 2471 if (chan->fcs == L2CAP_FCS_CRC16) 2472 hlen += L2CAP_FCS_SIZE; 2473 2474 count = min_t(unsigned int, (conn->mtu - hlen), len); 2475 2476 skb = chan->ops->alloc_skb(chan, hlen, count, 2477 msg->msg_flags & MSG_DONTWAIT); 2478 if (IS_ERR(skb)) 2479 return skb; 2480 2481 /* Create L2CAP header */ 2482 lh = skb_put(skb, L2CAP_HDR_SIZE); 2483 lh->cid = cpu_to_le16(chan->dcid); 2484 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2485 2486 /* Control header is populated later */ 2487 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2488 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 2489 else 2490 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 2491 2492 if (sdulen) 2493 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2494 2495 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2496 if (unlikely(err < 0)) { 2497 kfree_skb(skb); 2498 return ERR_PTR(err); 2499 } 2500 2501 bt_cb(skb)->l2cap.fcs = chan->fcs; 2502 bt_cb(skb)->l2cap.retries = 0; 2503 return skb; 2504 } 2505 2506 static int l2cap_segment_sdu(struct l2cap_chan *chan, 2507 struct sk_buff_head *seg_queue, 2508 struct msghdr *msg, size_t len) 2509 { 2510 struct sk_buff *skb; 2511 u16 sdu_len; 2512 size_t pdu_len; 2513 u8 sar; 2514 2515 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2516 2517 /* It is critical that ERTM PDUs fit in a single HCI fragment, 2518 * so fragmented skbs are not used. The HCI layer's handling 2519 * of fragmented skbs is not compatible with ERTM's queueing. 2520 */ 2521 2522 /* PDU size is derived from the HCI MTU */ 2523 pdu_len = chan->conn->mtu; 2524 2525 /* Constrain PDU size for BR/EDR connections */ 2526 if (!chan->hs_hcon) 2527 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2528 2529 /* Adjust for largest possible L2CAP overhead. */ 2530 if (chan->fcs) 2531 pdu_len -= L2CAP_FCS_SIZE; 2532 2533 pdu_len -= __ertm_hdr_size(chan); 2534 2535 /* Remote device may have requested smaller PDUs */ 2536 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2537 2538 if (len <= pdu_len) { 2539 sar = L2CAP_SAR_UNSEGMENTED; 2540 sdu_len = 0; 2541 pdu_len = len; 2542 } else { 2543 sar = L2CAP_SAR_START; 2544 sdu_len = len; 2545 } 2546 2547 while (len > 0) { 2548 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); 2549 2550 if (IS_ERR(skb)) { 2551 __skb_queue_purge(seg_queue); 2552 return PTR_ERR(skb); 2553 } 2554 2555 bt_cb(skb)->l2cap.sar = sar; 2556 __skb_queue_tail(seg_queue, skb); 2557 2558 len -= pdu_len; 2559 if (sdu_len) 2560 sdu_len = 0; 2561 2562 if (len <= pdu_len) { 2563 sar = L2CAP_SAR_END; 2564 pdu_len = len; 2565 } else { 2566 sar = L2CAP_SAR_CONTINUE; 2567 } 2568 } 2569 2570 return 0; 2571 } 2572 2573 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, 2574 struct msghdr *msg, 2575 size_t len, u16 sdulen) 2576 { 2577 struct l2cap_conn *conn = chan->conn; 2578 struct sk_buff *skb; 2579 int err, count, hlen; 2580 struct l2cap_hdr *lh; 2581 2582 BT_DBG("chan %p len %zu", chan, len); 2583 2584 if (!conn) 2585 return ERR_PTR(-ENOTCONN); 2586 2587 hlen = L2CAP_HDR_SIZE; 2588 2589 if (sdulen) 2590 hlen += L2CAP_SDULEN_SIZE; 2591 2592 count = min_t(unsigned int, (conn->mtu - hlen), len); 2593 2594 skb = chan->ops->alloc_skb(chan, hlen, count, 2595 msg->msg_flags & MSG_DONTWAIT); 2596 if (IS_ERR(skb)) 2597 return skb; 2598 2599 /* Create L2CAP header */ 2600 lh = skb_put(skb, L2CAP_HDR_SIZE); 2601 lh->cid = cpu_to_le16(chan->dcid); 2602 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2603 2604 if (sdulen) 2605 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2606 2607 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2608 if (unlikely(err < 0)) { 2609 kfree_skb(skb); 2610 return ERR_PTR(err); 2611 } 2612 2613 return skb; 2614 } 2615 2616 static int l2cap_segment_le_sdu(struct l2cap_chan *chan, 2617 struct sk_buff_head *seg_queue, 2618 struct msghdr *msg, size_t len) 2619 { 2620 struct sk_buff *skb; 2621 size_t pdu_len; 2622 u16 sdu_len; 2623 2624 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2625 2626 sdu_len = len; 2627 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; 2628 2629 while (len > 0) { 2630 if (len <= pdu_len) 2631 pdu_len = len; 2632 2633 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); 2634 if (IS_ERR(skb)) { 2635 __skb_queue_purge(seg_queue); 2636 return PTR_ERR(skb); 2637 } 2638 2639 __skb_queue_tail(seg_queue, skb); 2640 2641 len -= pdu_len; 2642 2643 if (sdu_len) { 2644 sdu_len = 0; 2645 pdu_len += L2CAP_SDULEN_SIZE; 2646 } 2647 } 2648 2649 return 0; 2650 } 2651 2652 static void l2cap_le_flowctl_send(struct l2cap_chan *chan) 2653 { 2654 int sent = 0; 2655 2656 BT_DBG("chan %p", chan); 2657 2658 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { 2659 l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); 2660 chan->tx_credits--; 2661 sent++; 2662 } 2663 2664 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, 2665 skb_queue_len(&chan->tx_q)); 2666 } 2667 2668 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 2669 { 2670 struct sk_buff *skb; 2671 int err; 2672 struct sk_buff_head seg_queue; 2673 2674 if (!chan->conn) 2675 return -ENOTCONN; 2676 2677 /* Connectionless channel */ 2678 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2679 skb = l2cap_create_connless_pdu(chan, msg, len); 2680 if (IS_ERR(skb)) 2681 return PTR_ERR(skb); 2682 2683 l2cap_do_send(chan, skb); 2684 return len; 2685 } 2686 2687 switch (chan->mode) { 2688 case L2CAP_MODE_LE_FLOWCTL: 2689 case L2CAP_MODE_EXT_FLOWCTL: 2690 /* Check outgoing MTU */ 2691 if (len > chan->omtu) 2692 return -EMSGSIZE; 2693 2694 __skb_queue_head_init(&seg_queue); 2695 2696 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); 2697 2698 if (chan->state != BT_CONNECTED) { 2699 __skb_queue_purge(&seg_queue); 2700 err = -ENOTCONN; 2701 } 2702 2703 if (err) 2704 return err; 2705 2706 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); 2707 2708 l2cap_le_flowctl_send(chan); 2709 2710 if (!chan->tx_credits) 2711 chan->ops->suspend(chan); 2712 2713 err = len; 2714 2715 break; 2716 2717 case L2CAP_MODE_BASIC: 2718 /* Check outgoing MTU */ 2719 if (len > chan->omtu) 2720 return -EMSGSIZE; 2721 2722 /* Create a basic PDU */ 2723 skb = l2cap_create_basic_pdu(chan, msg, len); 2724 if (IS_ERR(skb)) 2725 return PTR_ERR(skb); 2726 2727 l2cap_do_send(chan, skb); 2728 err = len; 2729 break; 2730 2731 case L2CAP_MODE_ERTM: 2732 case L2CAP_MODE_STREAMING: 2733 /* Check outgoing MTU */ 2734 if (len > chan->omtu) { 2735 err = -EMSGSIZE; 2736 break; 2737 } 2738 2739 __skb_queue_head_init(&seg_queue); 2740 2741 /* Do segmentation before calling in to the state machine, 2742 * since it's possible to block while waiting for memory 2743 * allocation. 2744 */ 2745 err = l2cap_segment_sdu(chan, &seg_queue, msg, len); 2746 2747 if (err) 2748 break; 2749 2750 if (chan->mode == L2CAP_MODE_ERTM) 2751 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); 2752 else 2753 l2cap_streaming_send(chan, &seg_queue); 2754 2755 err = len; 2756 2757 /* If the skbs were not queued for sending, they'll still be in 2758 * seg_queue and need to be purged. 2759 */ 2760 __skb_queue_purge(&seg_queue); 2761 break; 2762 2763 default: 2764 BT_DBG("bad state %1.1x", chan->mode); 2765 err = -EBADFD; 2766 } 2767 2768 return err; 2769 } 2770 EXPORT_SYMBOL_GPL(l2cap_chan_send); 2771 2772 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) 2773 { 2774 struct l2cap_ctrl control; 2775 u16 seq; 2776 2777 BT_DBG("chan %p, txseq %u", chan, txseq); 2778 2779 memset(&control, 0, sizeof(control)); 2780 control.sframe = 1; 2781 control.super = L2CAP_SUPER_SREJ; 2782 2783 for (seq = chan->expected_tx_seq; seq != txseq; 2784 seq = __next_seq(chan, seq)) { 2785 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { 2786 control.reqseq = seq; 2787 l2cap_send_sframe(chan, &control); 2788 l2cap_seq_list_append(&chan->srej_list, seq); 2789 } 2790 } 2791 2792 chan->expected_tx_seq = __next_seq(chan, txseq); 2793 } 2794 2795 static void l2cap_send_srej_tail(struct l2cap_chan *chan) 2796 { 2797 struct l2cap_ctrl control; 2798 2799 BT_DBG("chan %p", chan); 2800 2801 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) 2802 return; 2803 2804 memset(&control, 0, sizeof(control)); 2805 control.sframe = 1; 2806 control.super = L2CAP_SUPER_SREJ; 2807 control.reqseq = chan->srej_list.tail; 2808 l2cap_send_sframe(chan, &control); 2809 } 2810 2811 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) 2812 { 2813 struct l2cap_ctrl control; 2814 u16 initial_head; 2815 u16 seq; 2816 2817 BT_DBG("chan %p, txseq %u", chan, txseq); 2818 2819 memset(&control, 0, sizeof(control)); 2820 control.sframe = 1; 2821 control.super = L2CAP_SUPER_SREJ; 2822 2823 /* Capture initial list head to allow only one pass through the list. */ 2824 initial_head = chan->srej_list.head; 2825 2826 do { 2827 seq = l2cap_seq_list_pop(&chan->srej_list); 2828 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) 2829 break; 2830 2831 control.reqseq = seq; 2832 l2cap_send_sframe(chan, &control); 2833 l2cap_seq_list_append(&chan->srej_list, seq); 2834 } while (chan->srej_list.head != initial_head); 2835 } 2836 2837 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) 2838 { 2839 struct sk_buff *acked_skb; 2840 u16 ackseq; 2841 2842 BT_DBG("chan %p, reqseq %u", chan, reqseq); 2843 2844 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) 2845 return; 2846 2847 BT_DBG("expected_ack_seq %u, unacked_frames %u", 2848 chan->expected_ack_seq, chan->unacked_frames); 2849 2850 for (ackseq = chan->expected_ack_seq; ackseq != reqseq; 2851 ackseq = __next_seq(chan, ackseq)) { 2852 2853 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); 2854 if (acked_skb) { 2855 skb_unlink(acked_skb, &chan->tx_q); 2856 kfree_skb(acked_skb); 2857 chan->unacked_frames--; 2858 } 2859 } 2860 2861 chan->expected_ack_seq = reqseq; 2862 2863 if (chan->unacked_frames == 0) 2864 __clear_retrans_timer(chan); 2865 2866 BT_DBG("unacked_frames %u", chan->unacked_frames); 2867 } 2868 2869 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) 2870 { 2871 BT_DBG("chan %p", chan); 2872 2873 chan->expected_tx_seq = chan->buffer_seq; 2874 l2cap_seq_list_clear(&chan->srej_list); 2875 skb_queue_purge(&chan->srej_q); 2876 chan->rx_state = L2CAP_RX_STATE_RECV; 2877 } 2878 2879 static void l2cap_tx_state_xmit(struct l2cap_chan *chan, 2880 struct l2cap_ctrl *control, 2881 struct sk_buff_head *skbs, u8 event) 2882 { 2883 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2884 event); 2885 2886 switch (event) { 2887 case L2CAP_EV_DATA_REQUEST: 2888 if (chan->tx_send_head == NULL) 2889 chan->tx_send_head = skb_peek(skbs); 2890 2891 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2892 l2cap_ertm_send(chan); 2893 break; 2894 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2895 BT_DBG("Enter LOCAL_BUSY"); 2896 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2897 2898 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2899 /* The SREJ_SENT state must be aborted if we are to 2900 * enter the LOCAL_BUSY state. 2901 */ 2902 l2cap_abort_rx_srej_sent(chan); 2903 } 2904 2905 l2cap_send_ack(chan); 2906 2907 break; 2908 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2909 BT_DBG("Exit LOCAL_BUSY"); 2910 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2911 2912 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2913 struct l2cap_ctrl local_control; 2914 2915 memset(&local_control, 0, sizeof(local_control)); 2916 local_control.sframe = 1; 2917 local_control.super = L2CAP_SUPER_RR; 2918 local_control.poll = 1; 2919 local_control.reqseq = chan->buffer_seq; 2920 l2cap_send_sframe(chan, &local_control); 2921 2922 chan->retry_count = 1; 2923 __set_monitor_timer(chan); 2924 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2925 } 2926 break; 2927 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2928 l2cap_process_reqseq(chan, control->reqseq); 2929 break; 2930 case L2CAP_EV_EXPLICIT_POLL: 2931 l2cap_send_rr_or_rnr(chan, 1); 2932 chan->retry_count = 1; 2933 __set_monitor_timer(chan); 2934 __clear_ack_timer(chan); 2935 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2936 break; 2937 case L2CAP_EV_RETRANS_TO: 2938 l2cap_send_rr_or_rnr(chan, 1); 2939 chan->retry_count = 1; 2940 __set_monitor_timer(chan); 2941 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2942 break; 2943 case L2CAP_EV_RECV_FBIT: 2944 /* Nothing to process */ 2945 break; 2946 default: 2947 break; 2948 } 2949 } 2950 2951 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, 2952 struct l2cap_ctrl *control, 2953 struct sk_buff_head *skbs, u8 event) 2954 { 2955 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2956 event); 2957 2958 switch (event) { 2959 case L2CAP_EV_DATA_REQUEST: 2960 if (chan->tx_send_head == NULL) 2961 chan->tx_send_head = skb_peek(skbs); 2962 /* Queue data, but don't send. */ 2963 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2964 break; 2965 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2966 BT_DBG("Enter LOCAL_BUSY"); 2967 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2968 2969 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2970 /* The SREJ_SENT state must be aborted if we are to 2971 * enter the LOCAL_BUSY state. 2972 */ 2973 l2cap_abort_rx_srej_sent(chan); 2974 } 2975 2976 l2cap_send_ack(chan); 2977 2978 break; 2979 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2980 BT_DBG("Exit LOCAL_BUSY"); 2981 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2982 2983 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2984 struct l2cap_ctrl local_control; 2985 memset(&local_control, 0, sizeof(local_control)); 2986 local_control.sframe = 1; 2987 local_control.super = L2CAP_SUPER_RR; 2988 local_control.poll = 1; 2989 local_control.reqseq = chan->buffer_seq; 2990 l2cap_send_sframe(chan, &local_control); 2991 2992 chan->retry_count = 1; 2993 __set_monitor_timer(chan); 2994 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2995 } 2996 break; 2997 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2998 l2cap_process_reqseq(chan, control->reqseq); 2999 fallthrough; 3000 3001 case L2CAP_EV_RECV_FBIT: 3002 if (control && control->final) { 3003 __clear_monitor_timer(chan); 3004 if (chan->unacked_frames > 0) 3005 __set_retrans_timer(chan); 3006 chan->retry_count = 0; 3007 chan->tx_state = L2CAP_TX_STATE_XMIT; 3008 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); 3009 } 3010 break; 3011 case L2CAP_EV_EXPLICIT_POLL: 3012 /* Ignore */ 3013 break; 3014 case L2CAP_EV_MONITOR_TO: 3015 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { 3016 l2cap_send_rr_or_rnr(chan, 1); 3017 __set_monitor_timer(chan); 3018 chan->retry_count++; 3019 } else { 3020 l2cap_send_disconn_req(chan, ECONNABORTED); 3021 } 3022 break; 3023 default: 3024 break; 3025 } 3026 } 3027 3028 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 3029 struct sk_buff_head *skbs, u8 event) 3030 { 3031 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", 3032 chan, control, skbs, event, chan->tx_state); 3033 3034 switch (chan->tx_state) { 3035 case L2CAP_TX_STATE_XMIT: 3036 l2cap_tx_state_xmit(chan, control, skbs, event); 3037 break; 3038 case L2CAP_TX_STATE_WAIT_F: 3039 l2cap_tx_state_wait_f(chan, control, skbs, event); 3040 break; 3041 default: 3042 /* Ignore event */ 3043 break; 3044 } 3045 } 3046 3047 static void l2cap_pass_to_tx(struct l2cap_chan *chan, 3048 struct l2cap_ctrl *control) 3049 { 3050 BT_DBG("chan %p, control %p", chan, control); 3051 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); 3052 } 3053 3054 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, 3055 struct l2cap_ctrl *control) 3056 { 3057 BT_DBG("chan %p, control %p", chan, control); 3058 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); 3059 } 3060 3061 /* Copy frame to all raw sockets on that connection */ 3062 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 3063 { 3064 struct sk_buff *nskb; 3065 struct l2cap_chan *chan; 3066 3067 BT_DBG("conn %p", conn); 3068 3069 mutex_lock(&conn->chan_lock); 3070 3071 list_for_each_entry(chan, &conn->chan_l, list) { 3072 if (chan->chan_type != L2CAP_CHAN_RAW) 3073 continue; 3074 3075 /* Don't send frame to the channel it came from */ 3076 if (bt_cb(skb)->l2cap.chan == chan) 3077 continue; 3078 3079 nskb = skb_clone(skb, GFP_KERNEL); 3080 if (!nskb) 3081 continue; 3082 if (chan->ops->recv(chan, nskb)) 3083 kfree_skb(nskb); 3084 } 3085 3086 mutex_unlock(&conn->chan_lock); 3087 } 3088 3089 /* ---- L2CAP signalling commands ---- */ 3090 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, 3091 u8 ident, u16 dlen, void *data) 3092 { 3093 struct sk_buff *skb, **frag; 3094 struct l2cap_cmd_hdr *cmd; 3095 struct l2cap_hdr *lh; 3096 int len, count; 3097 3098 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 3099 conn, code, ident, dlen); 3100 3101 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) 3102 return NULL; 3103 3104 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 3105 count = min_t(unsigned int, conn->mtu, len); 3106 3107 skb = bt_skb_alloc(count, GFP_KERNEL); 3108 if (!skb) 3109 return NULL; 3110 3111 lh = skb_put(skb, L2CAP_HDR_SIZE); 3112 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 3113 3114 if (conn->hcon->type == LE_LINK) 3115 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 3116 else 3117 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 3118 3119 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); 3120 cmd->code = code; 3121 cmd->ident = ident; 3122 cmd->len = cpu_to_le16(dlen); 3123 3124 if (dlen) { 3125 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; 3126 skb_put_data(skb, data, count); 3127 data += count; 3128 } 3129 3130 len -= skb->len; 3131 3132 /* Continuation fragments (no L2CAP header) */ 3133 frag = &skb_shinfo(skb)->frag_list; 3134 while (len) { 3135 count = min_t(unsigned int, conn->mtu, len); 3136 3137 *frag = bt_skb_alloc(count, GFP_KERNEL); 3138 if (!*frag) 3139 goto fail; 3140 3141 skb_put_data(*frag, data, count); 3142 3143 len -= count; 3144 data += count; 3145 3146 frag = &(*frag)->next; 3147 } 3148 3149 return skb; 3150 3151 fail: 3152 kfree_skb(skb); 3153 return NULL; 3154 } 3155 3156 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, 3157 unsigned long *val) 3158 { 3159 struct l2cap_conf_opt *opt = *ptr; 3160 int len; 3161 3162 len = L2CAP_CONF_OPT_SIZE + opt->len; 3163 *ptr += len; 3164 3165 *type = opt->type; 3166 *olen = opt->len; 3167 3168 switch (opt->len) { 3169 case 1: 3170 *val = *((u8 *) opt->val); 3171 break; 3172 3173 case 2: 3174 *val = get_unaligned_le16(opt->val); 3175 break; 3176 3177 case 4: 3178 *val = get_unaligned_le32(opt->val); 3179 break; 3180 3181 default: 3182 *val = (unsigned long) opt->val; 3183 break; 3184 } 3185 3186 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); 3187 return len; 3188 } 3189 3190 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) 3191 { 3192 struct l2cap_conf_opt *opt = *ptr; 3193 3194 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); 3195 3196 if (size < L2CAP_CONF_OPT_SIZE + len) 3197 return; 3198 3199 opt->type = type; 3200 opt->len = len; 3201 3202 switch (len) { 3203 case 1: 3204 *((u8 *) opt->val) = val; 3205 break; 3206 3207 case 2: 3208 put_unaligned_le16(val, opt->val); 3209 break; 3210 3211 case 4: 3212 put_unaligned_le32(val, opt->val); 3213 break; 3214 3215 default: 3216 memcpy(opt->val, (void *) val, len); 3217 break; 3218 } 3219 3220 *ptr += L2CAP_CONF_OPT_SIZE + len; 3221 } 3222 3223 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) 3224 { 3225 struct l2cap_conf_efs efs; 3226 3227 switch (chan->mode) { 3228 case L2CAP_MODE_ERTM: 3229 efs.id = chan->local_id; 3230 efs.stype = chan->local_stype; 3231 efs.msdu = cpu_to_le16(chan->local_msdu); 3232 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3233 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 3234 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); 3235 break; 3236 3237 case L2CAP_MODE_STREAMING: 3238 efs.id = 1; 3239 efs.stype = L2CAP_SERV_BESTEFFORT; 3240 efs.msdu = cpu_to_le16(chan->local_msdu); 3241 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3242 efs.acc_lat = 0; 3243 efs.flush_to = 0; 3244 break; 3245 3246 default: 3247 return; 3248 } 3249 3250 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), 3251 (unsigned long) &efs, size); 3252 } 3253 3254 static void l2cap_ack_timeout(struct work_struct *work) 3255 { 3256 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 3257 ack_timer.work); 3258 u16 frames_to_ack; 3259 3260 BT_DBG("chan %p", chan); 3261 3262 l2cap_chan_lock(chan); 3263 3264 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 3265 chan->last_acked_seq); 3266 3267 if (frames_to_ack) 3268 l2cap_send_rr_or_rnr(chan, 0); 3269 3270 l2cap_chan_unlock(chan); 3271 l2cap_chan_put(chan); 3272 } 3273 3274 int l2cap_ertm_init(struct l2cap_chan *chan) 3275 { 3276 int err; 3277 3278 chan->next_tx_seq = 0; 3279 chan->expected_tx_seq = 0; 3280 chan->expected_ack_seq = 0; 3281 chan->unacked_frames = 0; 3282 chan->buffer_seq = 0; 3283 chan->frames_sent = 0; 3284 chan->last_acked_seq = 0; 3285 chan->sdu = NULL; 3286 chan->sdu_last_frag = NULL; 3287 chan->sdu_len = 0; 3288 3289 skb_queue_head_init(&chan->tx_q); 3290 3291 chan->local_amp_id = AMP_ID_BREDR; 3292 chan->move_id = AMP_ID_BREDR; 3293 chan->move_state = L2CAP_MOVE_STABLE; 3294 chan->move_role = L2CAP_MOVE_ROLE_NONE; 3295 3296 if (chan->mode != L2CAP_MODE_ERTM) 3297 return 0; 3298 3299 chan->rx_state = L2CAP_RX_STATE_RECV; 3300 chan->tx_state = L2CAP_TX_STATE_XMIT; 3301 3302 skb_queue_head_init(&chan->srej_q); 3303 3304 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 3305 if (err < 0) 3306 return err; 3307 3308 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 3309 if (err < 0) 3310 l2cap_seq_list_free(&chan->srej_list); 3311 3312 return err; 3313 } 3314 3315 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 3316 { 3317 switch (mode) { 3318 case L2CAP_MODE_STREAMING: 3319 case L2CAP_MODE_ERTM: 3320 if (l2cap_mode_supported(mode, remote_feat_mask)) 3321 return mode; 3322 fallthrough; 3323 default: 3324 return L2CAP_MODE_BASIC; 3325 } 3326 } 3327 3328 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) 3329 { 3330 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && 3331 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)); 3332 } 3333 3334 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) 3335 { 3336 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && 3337 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW)); 3338 } 3339 3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, 3341 struct l2cap_conf_rfc *rfc) 3342 { 3343 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) { 3344 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to; 3345 3346 /* Class 1 devices have must have ERTM timeouts 3347 * exceeding the Link Supervision Timeout. The 3348 * default Link Supervision Timeout for AMP 3349 * controllers is 10 seconds. 3350 * 3351 * Class 1 devices use 0xffffffff for their 3352 * best-effort flush timeout, so the clamping logic 3353 * will result in a timeout that meets the above 3354 * requirement. ERTM timeouts are 16-bit values, so 3355 * the maximum timeout is 65.535 seconds. 3356 */ 3357 3358 /* Convert timeout to milliseconds and round */ 3359 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000); 3360 3361 /* This is the recommended formula for class 2 devices 3362 * that start ERTM timers when packets are sent to the 3363 * controller. 3364 */ 3365 ertm_to = 3 * ertm_to + 500; 3366 3367 if (ertm_to > 0xffff) 3368 ertm_to = 0xffff; 3369 3370 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to); 3371 rfc->monitor_timeout = rfc->retrans_timeout; 3372 } else { 3373 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3374 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3375 } 3376 } 3377 3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan) 3379 { 3380 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && 3381 __l2cap_ews_supported(chan->conn)) { 3382 /* use extended control field */ 3383 set_bit(FLAG_EXT_CTRL, &chan->flags); 3384 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3385 } else { 3386 chan->tx_win = min_t(u16, chan->tx_win, 3387 L2CAP_DEFAULT_TX_WINDOW); 3388 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 3389 } 3390 chan->ack_win = chan->tx_win; 3391 } 3392 3393 static void l2cap_mtu_auto(struct l2cap_chan *chan) 3394 { 3395 struct hci_conn *conn = chan->conn->hcon; 3396 3397 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3398 3399 /* The 2-DH1 packet has between 2 and 56 information bytes 3400 * (including the 2-byte payload header) 3401 */ 3402 if (!(conn->pkt_type & HCI_2DH1)) 3403 chan->imtu = 54; 3404 3405 /* The 3-DH1 packet has between 2 and 85 information bytes 3406 * (including the 2-byte payload header) 3407 */ 3408 if (!(conn->pkt_type & HCI_3DH1)) 3409 chan->imtu = 83; 3410 3411 /* The 2-DH3 packet has between 2 and 369 information bytes 3412 * (including the 2-byte payload header) 3413 */ 3414 if (!(conn->pkt_type & HCI_2DH3)) 3415 chan->imtu = 367; 3416 3417 /* The 3-DH3 packet has between 2 and 554 information bytes 3418 * (including the 2-byte payload header) 3419 */ 3420 if (!(conn->pkt_type & HCI_3DH3)) 3421 chan->imtu = 552; 3422 3423 /* The 2-DH5 packet has between 2 and 681 information bytes 3424 * (including the 2-byte payload header) 3425 */ 3426 if (!(conn->pkt_type & HCI_2DH5)) 3427 chan->imtu = 679; 3428 3429 /* The 3-DH5 packet has between 2 and 1023 information bytes 3430 * (including the 2-byte payload header) 3431 */ 3432 if (!(conn->pkt_type & HCI_3DH5)) 3433 chan->imtu = 1021; 3434 } 3435 3436 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) 3437 { 3438 struct l2cap_conf_req *req = data; 3439 struct l2cap_conf_rfc rfc = { .mode = chan->mode }; 3440 void *ptr = req->data; 3441 void *endptr = data + data_size; 3442 u16 size; 3443 3444 BT_DBG("chan %p", chan); 3445 3446 if (chan->num_conf_req || chan->num_conf_rsp) 3447 goto done; 3448 3449 switch (chan->mode) { 3450 case L2CAP_MODE_STREAMING: 3451 case L2CAP_MODE_ERTM: 3452 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) 3453 break; 3454 3455 if (__l2cap_efs_supported(chan->conn)) 3456 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3457 3458 fallthrough; 3459 default: 3460 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); 3461 break; 3462 } 3463 3464 done: 3465 if (chan->imtu != L2CAP_DEFAULT_MTU) { 3466 if (!chan->imtu) 3467 l2cap_mtu_auto(chan); 3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, 3469 endptr - ptr); 3470 } 3471 3472 switch (chan->mode) { 3473 case L2CAP_MODE_BASIC: 3474 if (disable_ertm) 3475 break; 3476 3477 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3478 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3479 break; 3480 3481 rfc.mode = L2CAP_MODE_BASIC; 3482 rfc.txwin_size = 0; 3483 rfc.max_transmit = 0; 3484 rfc.retrans_timeout = 0; 3485 rfc.monitor_timeout = 0; 3486 rfc.max_pdu_size = 0; 3487 3488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3489 (unsigned long) &rfc, endptr - ptr); 3490 break; 3491 3492 case L2CAP_MODE_ERTM: 3493 rfc.mode = L2CAP_MODE_ERTM; 3494 rfc.max_transmit = chan->max_tx; 3495 3496 __l2cap_set_ertm_timeouts(chan, &rfc); 3497 3498 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3499 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3500 L2CAP_FCS_SIZE); 3501 rfc.max_pdu_size = cpu_to_le16(size); 3502 3503 l2cap_txwin_setup(chan); 3504 3505 rfc.txwin_size = min_t(u16, chan->tx_win, 3506 L2CAP_DEFAULT_TX_WINDOW); 3507 3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3509 (unsigned long) &rfc, endptr - ptr); 3510 3511 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3512 l2cap_add_opt_efs(&ptr, chan, endptr - ptr); 3513 3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3516 chan->tx_win, endptr - ptr); 3517 3518 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3519 if (chan->fcs == L2CAP_FCS_NONE || 3520 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3521 chan->fcs = L2CAP_FCS_NONE; 3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3523 chan->fcs, endptr - ptr); 3524 } 3525 break; 3526 3527 case L2CAP_MODE_STREAMING: 3528 l2cap_txwin_setup(chan); 3529 rfc.mode = L2CAP_MODE_STREAMING; 3530 rfc.txwin_size = 0; 3531 rfc.max_transmit = 0; 3532 rfc.retrans_timeout = 0; 3533 rfc.monitor_timeout = 0; 3534 3535 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3536 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3537 L2CAP_FCS_SIZE); 3538 rfc.max_pdu_size = cpu_to_le16(size); 3539 3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3541 (unsigned long) &rfc, endptr - ptr); 3542 3543 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3544 l2cap_add_opt_efs(&ptr, chan, endptr - ptr); 3545 3546 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3547 if (chan->fcs == L2CAP_FCS_NONE || 3548 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3549 chan->fcs = L2CAP_FCS_NONE; 3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3551 chan->fcs, endptr - ptr); 3552 } 3553 break; 3554 } 3555 3556 req->dcid = cpu_to_le16(chan->dcid); 3557 req->flags = cpu_to_le16(0); 3558 3559 return ptr - data; 3560 } 3561 3562 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) 3563 { 3564 struct l2cap_conf_rsp *rsp = data; 3565 void *ptr = rsp->data; 3566 void *endptr = data + data_size; 3567 void *req = chan->conf_req; 3568 int len = chan->conf_len; 3569 int type, hint, olen; 3570 unsigned long val; 3571 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3572 struct l2cap_conf_efs efs; 3573 u8 remote_efs = 0; 3574 u16 mtu = L2CAP_DEFAULT_MTU; 3575 u16 result = L2CAP_CONF_SUCCESS; 3576 u16 size; 3577 3578 BT_DBG("chan %p", chan); 3579 3580 while (len >= L2CAP_CONF_OPT_SIZE) { 3581 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 3582 if (len < 0) 3583 break; 3584 3585 hint = type & L2CAP_CONF_HINT; 3586 type &= L2CAP_CONF_MASK; 3587 3588 switch (type) { 3589 case L2CAP_CONF_MTU: 3590 if (olen != 2) 3591 break; 3592 mtu = val; 3593 break; 3594 3595 case L2CAP_CONF_FLUSH_TO: 3596 if (olen != 2) 3597 break; 3598 chan->flush_to = val; 3599 break; 3600 3601 case L2CAP_CONF_QOS: 3602 break; 3603 3604 case L2CAP_CONF_RFC: 3605 if (olen != sizeof(rfc)) 3606 break; 3607 memcpy(&rfc, (void *) val, olen); 3608 break; 3609 3610 case L2CAP_CONF_FCS: 3611 if (olen != 1) 3612 break; 3613 if (val == L2CAP_FCS_NONE) 3614 set_bit(CONF_RECV_NO_FCS, &chan->conf_state); 3615 break; 3616 3617 case L2CAP_CONF_EFS: 3618 if (olen != sizeof(efs)) 3619 break; 3620 remote_efs = 1; 3621 memcpy(&efs, (void *) val, olen); 3622 break; 3623 3624 case L2CAP_CONF_EWS: 3625 if (olen != 2) 3626 break; 3627 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) 3628 return -ECONNREFUSED; 3629 set_bit(FLAG_EXT_CTRL, &chan->flags); 3630 set_bit(CONF_EWS_RECV, &chan->conf_state); 3631 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3632 chan->remote_tx_win = val; 3633 break; 3634 3635 default: 3636 if (hint) 3637 break; 3638 result = L2CAP_CONF_UNKNOWN; 3639 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); 3640 break; 3641 } 3642 } 3643 3644 if (chan->num_conf_rsp || chan->num_conf_req > 1) 3645 goto done; 3646 3647 switch (chan->mode) { 3648 case L2CAP_MODE_STREAMING: 3649 case L2CAP_MODE_ERTM: 3650 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { 3651 chan->mode = l2cap_select_mode(rfc.mode, 3652 chan->conn->feat_mask); 3653 break; 3654 } 3655 3656 if (remote_efs) { 3657 if (__l2cap_efs_supported(chan->conn)) 3658 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3659 else 3660 return -ECONNREFUSED; 3661 } 3662 3663 if (chan->mode != rfc.mode) 3664 return -ECONNREFUSED; 3665 3666 break; 3667 } 3668 3669 done: 3670 if (chan->mode != rfc.mode) { 3671 result = L2CAP_CONF_UNACCEPT; 3672 rfc.mode = chan->mode; 3673 3674 if (chan->num_conf_rsp == 1) 3675 return -ECONNREFUSED; 3676 3677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3678 (unsigned long) &rfc, endptr - ptr); 3679 } 3680 3681 if (result == L2CAP_CONF_SUCCESS) { 3682 /* Configure output options and let the other side know 3683 * which ones we don't like. */ 3684 3685 if (mtu < L2CAP_DEFAULT_MIN_MTU) 3686 result = L2CAP_CONF_UNACCEPT; 3687 else { 3688 chan->omtu = mtu; 3689 set_bit(CONF_MTU_DONE, &chan->conf_state); 3690 } 3691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); 3692 3693 if (remote_efs) { 3694 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3695 efs.stype != L2CAP_SERV_NOTRAFIC && 3696 efs.stype != chan->local_stype) { 3697 3698 result = L2CAP_CONF_UNACCEPT; 3699 3700 if (chan->num_conf_req >= 1) 3701 return -ECONNREFUSED; 3702 3703 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3704 sizeof(efs), 3705 (unsigned long) &efs, endptr - ptr); 3706 } else { 3707 /* Send PENDING Conf Rsp */ 3708 result = L2CAP_CONF_PENDING; 3709 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 3710 } 3711 } 3712 3713 switch (rfc.mode) { 3714 case L2CAP_MODE_BASIC: 3715 chan->fcs = L2CAP_FCS_NONE; 3716 set_bit(CONF_MODE_DONE, &chan->conf_state); 3717 break; 3718 3719 case L2CAP_MODE_ERTM: 3720 if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) 3721 chan->remote_tx_win = rfc.txwin_size; 3722 else 3723 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 3724 3725 chan->remote_max_tx = rfc.max_transmit; 3726 3727 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3728 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3729 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3730 rfc.max_pdu_size = cpu_to_le16(size); 3731 chan->remote_mps = size; 3732 3733 __l2cap_set_ertm_timeouts(chan, &rfc); 3734 3735 set_bit(CONF_MODE_DONE, &chan->conf_state); 3736 3737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3738 sizeof(rfc), (unsigned long) &rfc, endptr - ptr); 3739 3740 if (remote_efs && 3741 test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3742 chan->remote_id = efs.id; 3743 chan->remote_stype = efs.stype; 3744 chan->remote_msdu = le16_to_cpu(efs.msdu); 3745 chan->remote_flush_to = 3746 le32_to_cpu(efs.flush_to); 3747 chan->remote_acc_lat = 3748 le32_to_cpu(efs.acc_lat); 3749 chan->remote_sdu_itime = 3750 le32_to_cpu(efs.sdu_itime); 3751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3752 sizeof(efs), 3753 (unsigned long) &efs, endptr - ptr); 3754 } 3755 break; 3756 3757 case L2CAP_MODE_STREAMING: 3758 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3759 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3760 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3761 rfc.max_pdu_size = cpu_to_le16(size); 3762 chan->remote_mps = size; 3763 3764 set_bit(CONF_MODE_DONE, &chan->conf_state); 3765 3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3767 (unsigned long) &rfc, endptr - ptr); 3768 3769 break; 3770 3771 default: 3772 result = L2CAP_CONF_UNACCEPT; 3773 3774 memset(&rfc, 0, sizeof(rfc)); 3775 rfc.mode = chan->mode; 3776 } 3777 3778 if (result == L2CAP_CONF_SUCCESS) 3779 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 3780 } 3781 rsp->scid = cpu_to_le16(chan->dcid); 3782 rsp->result = cpu_to_le16(result); 3783 rsp->flags = cpu_to_le16(0); 3784 3785 return ptr - data; 3786 } 3787 3788 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, 3789 void *data, size_t size, u16 *result) 3790 { 3791 struct l2cap_conf_req *req = data; 3792 void *ptr = req->data; 3793 void *endptr = data + size; 3794 int type, olen; 3795 unsigned long val; 3796 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3797 struct l2cap_conf_efs efs; 3798 3799 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 3800 3801 while (len >= L2CAP_CONF_OPT_SIZE) { 3802 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3803 if (len < 0) 3804 break; 3805 3806 switch (type) { 3807 case L2CAP_CONF_MTU: 3808 if (olen != 2) 3809 break; 3810 if (val < L2CAP_DEFAULT_MIN_MTU) { 3811 *result = L2CAP_CONF_UNACCEPT; 3812 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3813 } else 3814 chan->imtu = val; 3815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, 3816 endptr - ptr); 3817 break; 3818 3819 case L2CAP_CONF_FLUSH_TO: 3820 if (olen != 2) 3821 break; 3822 chan->flush_to = val; 3823 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, 3824 chan->flush_to, endptr - ptr); 3825 break; 3826 3827 case L2CAP_CONF_RFC: 3828 if (olen != sizeof(rfc)) 3829 break; 3830 memcpy(&rfc, (void *)val, olen); 3831 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && 3832 rfc.mode != chan->mode) 3833 return -ECONNREFUSED; 3834 chan->fcs = 0; 3835 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3836 (unsigned long) &rfc, endptr - ptr); 3837 break; 3838 3839 case L2CAP_CONF_EWS: 3840 if (olen != 2) 3841 break; 3842 chan->ack_win = min_t(u16, val, chan->ack_win); 3843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3844 chan->tx_win, endptr - ptr); 3845 break; 3846 3847 case L2CAP_CONF_EFS: 3848 if (olen != sizeof(efs)) 3849 break; 3850 memcpy(&efs, (void *)val, olen); 3851 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3852 efs.stype != L2CAP_SERV_NOTRAFIC && 3853 efs.stype != chan->local_stype) 3854 return -ECONNREFUSED; 3855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3856 (unsigned long) &efs, endptr - ptr); 3857 break; 3858 3859 case L2CAP_CONF_FCS: 3860 if (olen != 1) 3861 break; 3862 if (*result == L2CAP_CONF_PENDING) 3863 if (val == L2CAP_FCS_NONE) 3864 set_bit(CONF_RECV_NO_FCS, 3865 &chan->conf_state); 3866 break; 3867 } 3868 } 3869 3870 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) 3871 return -ECONNREFUSED; 3872 3873 chan->mode = rfc.mode; 3874 3875 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { 3876 switch (rfc.mode) { 3877 case L2CAP_MODE_ERTM: 3878 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3879 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3880 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3881 if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) 3882 chan->ack_win = min_t(u16, chan->ack_win, 3883 rfc.txwin_size); 3884 3885 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3886 chan->local_msdu = le16_to_cpu(efs.msdu); 3887 chan->local_sdu_itime = 3888 le32_to_cpu(efs.sdu_itime); 3889 chan->local_acc_lat = le32_to_cpu(efs.acc_lat); 3890 chan->local_flush_to = 3891 le32_to_cpu(efs.flush_to); 3892 } 3893 break; 3894 3895 case L2CAP_MODE_STREAMING: 3896 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3897 } 3898 } 3899 3900 req->dcid = cpu_to_le16(chan->dcid); 3901 req->flags = cpu_to_le16(0); 3902 3903 return ptr - data; 3904 } 3905 3906 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, 3907 u16 result, u16 flags) 3908 { 3909 struct l2cap_conf_rsp *rsp = data; 3910 void *ptr = rsp->data; 3911 3912 BT_DBG("chan %p", chan); 3913 3914 rsp->scid = cpu_to_le16(chan->dcid); 3915 rsp->result = cpu_to_le16(result); 3916 rsp->flags = cpu_to_le16(flags); 3917 3918 return ptr - data; 3919 } 3920 3921 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) 3922 { 3923 struct l2cap_le_conn_rsp rsp; 3924 struct l2cap_conn *conn = chan->conn; 3925 3926 BT_DBG("chan %p", chan); 3927 3928 rsp.dcid = cpu_to_le16(chan->scid); 3929 rsp.mtu = cpu_to_le16(chan->imtu); 3930 rsp.mps = cpu_to_le16(chan->mps); 3931 rsp.credits = cpu_to_le16(chan->rx_credits); 3932 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); 3933 3934 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 3935 &rsp); 3936 } 3937 3938 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) 3939 { 3940 int *result = data; 3941 3942 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 3943 return; 3944 3945 switch (chan->state) { 3946 case BT_CONNECT2: 3947 /* If channel still pending accept add to result */ 3948 (*result)++; 3949 return; 3950 case BT_CONNECTED: 3951 return; 3952 default: 3953 /* If not connected or pending accept it has been refused */ 3954 *result = -ECONNREFUSED; 3955 return; 3956 } 3957 } 3958 3959 struct l2cap_ecred_rsp_data { 3960 struct { 3961 struct l2cap_ecred_conn_rsp rsp; 3962 __le16 scid[L2CAP_ECRED_MAX_CID]; 3963 } __packed pdu; 3964 int count; 3965 }; 3966 3967 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) 3968 { 3969 struct l2cap_ecred_rsp_data *rsp = data; 3970 3971 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 3972 return; 3973 3974 /* Reset ident so only one response is sent */ 3975 chan->ident = 0; 3976 3977 /* Include all channels pending with the same ident */ 3978 if (!rsp->pdu.rsp.result) 3979 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid); 3980 else 3981 l2cap_chan_del(chan, ECONNRESET); 3982 } 3983 3984 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) 3985 { 3986 struct l2cap_conn *conn = chan->conn; 3987 struct l2cap_ecred_rsp_data data; 3988 u16 id = chan->ident; 3989 int result = 0; 3990 3991 if (!id) 3992 return; 3993 3994 BT_DBG("chan %p id %d", chan, id); 3995 3996 memset(&data, 0, sizeof(data)); 3997 3998 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); 3999 data.pdu.rsp.mps = cpu_to_le16(chan->mps); 4000 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); 4001 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); 4002 4003 /* Verify that all channels are ready */ 4004 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); 4005 4006 if (result > 0) 4007 return; 4008 4009 if (result < 0) 4010 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); 4011 4012 /* Build response */ 4013 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); 4014 4015 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, 4016 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), 4017 &data.pdu); 4018 } 4019 4020 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) 4021 { 4022 struct l2cap_conn_rsp rsp; 4023 struct l2cap_conn *conn = chan->conn; 4024 u8 buf[128]; 4025 u8 rsp_code; 4026 4027 rsp.scid = cpu_to_le16(chan->dcid); 4028 rsp.dcid = cpu_to_le16(chan->scid); 4029 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 4030 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4031 4032 if (chan->hs_hcon) 4033 rsp_code = L2CAP_CREATE_CHAN_RSP; 4034 else 4035 rsp_code = L2CAP_CONN_RSP; 4036 4037 BT_DBG("chan %p rsp_code %u", chan, rsp_code); 4038 4039 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); 4040 4041 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 4042 return; 4043 4044 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4045 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4046 chan->num_conf_req++; 4047 } 4048 4049 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) 4050 { 4051 int type, olen; 4052 unsigned long val; 4053 /* Use sane default values in case a misbehaving remote device 4054 * did not send an RFC or extended window size option. 4055 */ 4056 u16 txwin_ext = chan->ack_win; 4057 struct l2cap_conf_rfc rfc = { 4058 .mode = chan->mode, 4059 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), 4060 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), 4061 .max_pdu_size = cpu_to_le16(chan->imtu), 4062 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), 4063 }; 4064 4065 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 4066 4067 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) 4068 return; 4069 4070 while (len >= L2CAP_CONF_OPT_SIZE) { 4071 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 4072 if (len < 0) 4073 break; 4074 4075 switch (type) { 4076 case L2CAP_CONF_RFC: 4077 if (olen != sizeof(rfc)) 4078 break; 4079 memcpy(&rfc, (void *)val, olen); 4080 break; 4081 case L2CAP_CONF_EWS: 4082 if (olen != 2) 4083 break; 4084 txwin_ext = val; 4085 break; 4086 } 4087 } 4088 4089 switch (rfc.mode) { 4090 case L2CAP_MODE_ERTM: 4091 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 4092 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 4093 chan->mps = le16_to_cpu(rfc.max_pdu_size); 4094 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 4095 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); 4096 else 4097 chan->ack_win = min_t(u16, chan->ack_win, 4098 rfc.txwin_size); 4099 break; 4100 case L2CAP_MODE_STREAMING: 4101 chan->mps = le16_to_cpu(rfc.max_pdu_size); 4102 } 4103 } 4104 4105 static inline int l2cap_command_rej(struct l2cap_conn *conn, 4106 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4107 u8 *data) 4108 { 4109 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 4110 4111 if (cmd_len < sizeof(*rej)) 4112 return -EPROTO; 4113 4114 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 4115 return 0; 4116 4117 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 4118 cmd->ident == conn->info_ident) { 4119 cancel_delayed_work(&conn->info_timer); 4120 4121 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4122 conn->info_ident = 0; 4123 4124 l2cap_conn_start(conn); 4125 } 4126 4127 return 0; 4128 } 4129 4130 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, 4131 struct l2cap_cmd_hdr *cmd, 4132 u8 *data, u8 rsp_code, u8 amp_id) 4133 { 4134 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 4135 struct l2cap_conn_rsp rsp; 4136 struct l2cap_chan *chan = NULL, *pchan; 4137 int result, status = L2CAP_CS_NO_INFO; 4138 4139 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 4140 __le16 psm = req->psm; 4141 4142 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); 4143 4144 /* Check if we have socket listening on psm */ 4145 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 4146 &conn->hcon->dst, ACL_LINK); 4147 if (!pchan) { 4148 result = L2CAP_CR_BAD_PSM; 4149 goto sendresp; 4150 } 4151 4152 mutex_lock(&conn->chan_lock); 4153 l2cap_chan_lock(pchan); 4154 4155 /* Check if the ACL is secure enough (if not SDP) */ 4156 if (psm != cpu_to_le16(L2CAP_PSM_SDP) && 4157 !hci_conn_check_link_mode(conn->hcon)) { 4158 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 4159 result = L2CAP_CR_SEC_BLOCK; 4160 goto response; 4161 } 4162 4163 result = L2CAP_CR_NO_MEM; 4164 4165 /* Check for valid dynamic CID range (as per Erratum 3253) */ 4166 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { 4167 result = L2CAP_CR_INVALID_SCID; 4168 goto response; 4169 } 4170 4171 /* Check if we already have channel with that dcid */ 4172 if (__l2cap_get_chan_by_dcid(conn, scid)) { 4173 result = L2CAP_CR_SCID_IN_USE; 4174 goto response; 4175 } 4176 4177 chan = pchan->ops->new_connection(pchan); 4178 if (!chan) 4179 goto response; 4180 4181 /* For certain devices (ex: HID mouse), support for authentication, 4182 * pairing and bonding is optional. For such devices, inorder to avoid 4183 * the ACL alive for too long after L2CAP disconnection, reset the ACL 4184 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. 4185 */ 4186 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4187 4188 bacpy(&chan->src, &conn->hcon->src); 4189 bacpy(&chan->dst, &conn->hcon->dst); 4190 chan->src_type = bdaddr_src_type(conn->hcon); 4191 chan->dst_type = bdaddr_dst_type(conn->hcon); 4192 chan->psm = psm; 4193 chan->dcid = scid; 4194 chan->local_amp_id = amp_id; 4195 4196 __l2cap_chan_add(conn, chan); 4197 4198 dcid = chan->scid; 4199 4200 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 4201 4202 chan->ident = cmd->ident; 4203 4204 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 4205 if (l2cap_chan_check_security(chan, false)) { 4206 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 4207 l2cap_state_change(chan, BT_CONNECT2); 4208 result = L2CAP_CR_PEND; 4209 status = L2CAP_CS_AUTHOR_PEND; 4210 chan->ops->defer(chan); 4211 } else { 4212 /* Force pending result for AMP controllers. 4213 * The connection will succeed after the 4214 * physical link is up. 4215 */ 4216 if (amp_id == AMP_ID_BREDR) { 4217 l2cap_state_change(chan, BT_CONFIG); 4218 result = L2CAP_CR_SUCCESS; 4219 } else { 4220 l2cap_state_change(chan, BT_CONNECT2); 4221 result = L2CAP_CR_PEND; 4222 } 4223 status = L2CAP_CS_NO_INFO; 4224 } 4225 } else { 4226 l2cap_state_change(chan, BT_CONNECT2); 4227 result = L2CAP_CR_PEND; 4228 status = L2CAP_CS_AUTHEN_PEND; 4229 } 4230 } else { 4231 l2cap_state_change(chan, BT_CONNECT2); 4232 result = L2CAP_CR_PEND; 4233 status = L2CAP_CS_NO_INFO; 4234 } 4235 4236 response: 4237 l2cap_chan_unlock(pchan); 4238 mutex_unlock(&conn->chan_lock); 4239 l2cap_chan_put(pchan); 4240 4241 sendresp: 4242 rsp.scid = cpu_to_le16(scid); 4243 rsp.dcid = cpu_to_le16(dcid); 4244 rsp.result = cpu_to_le16(result); 4245 rsp.status = cpu_to_le16(status); 4246 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); 4247 4248 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 4249 struct l2cap_info_req info; 4250 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4251 4252 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 4253 conn->info_ident = l2cap_get_ident(conn); 4254 4255 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 4256 4257 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 4258 sizeof(info), &info); 4259 } 4260 4261 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && 4262 result == L2CAP_CR_SUCCESS) { 4263 u8 buf[128]; 4264 set_bit(CONF_REQ_SENT, &chan->conf_state); 4265 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4266 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4267 chan->num_conf_req++; 4268 } 4269 4270 return chan; 4271 } 4272 4273 static int l2cap_connect_req(struct l2cap_conn *conn, 4274 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 4275 { 4276 struct hci_dev *hdev = conn->hcon->hdev; 4277 struct hci_conn *hcon = conn->hcon; 4278 4279 if (cmd_len < sizeof(struct l2cap_conn_req)) 4280 return -EPROTO; 4281 4282 hci_dev_lock(hdev); 4283 if (hci_dev_test_flag(hdev, HCI_MGMT) && 4284 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) 4285 mgmt_device_connected(hdev, hcon, NULL, 0); 4286 hci_dev_unlock(hdev); 4287 4288 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 4289 return 0; 4290 } 4291 4292 static int l2cap_connect_create_rsp(struct l2cap_conn *conn, 4293 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4294 u8 *data) 4295 { 4296 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 4297 u16 scid, dcid, result, status; 4298 struct l2cap_chan *chan; 4299 u8 req[128]; 4300 int err; 4301 4302 if (cmd_len < sizeof(*rsp)) 4303 return -EPROTO; 4304 4305 scid = __le16_to_cpu(rsp->scid); 4306 dcid = __le16_to_cpu(rsp->dcid); 4307 result = __le16_to_cpu(rsp->result); 4308 status = __le16_to_cpu(rsp->status); 4309 4310 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", 4311 dcid, scid, result, status); 4312 4313 mutex_lock(&conn->chan_lock); 4314 4315 if (scid) { 4316 chan = __l2cap_get_chan_by_scid(conn, scid); 4317 if (!chan) { 4318 err = -EBADSLT; 4319 goto unlock; 4320 } 4321 } else { 4322 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 4323 if (!chan) { 4324 err = -EBADSLT; 4325 goto unlock; 4326 } 4327 } 4328 4329 chan = l2cap_chan_hold_unless_zero(chan); 4330 if (!chan) { 4331 err = -EBADSLT; 4332 goto unlock; 4333 } 4334 4335 err = 0; 4336 4337 l2cap_chan_lock(chan); 4338 4339 switch (result) { 4340 case L2CAP_CR_SUCCESS: 4341 l2cap_state_change(chan, BT_CONFIG); 4342 chan->ident = 0; 4343 chan->dcid = dcid; 4344 clear_bit(CONF_CONNECT_PEND, &chan->conf_state); 4345 4346 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 4347 break; 4348 4349 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4350 l2cap_build_conf_req(chan, req, sizeof(req)), req); 4351 chan->num_conf_req++; 4352 break; 4353 4354 case L2CAP_CR_PEND: 4355 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 4356 break; 4357 4358 default: 4359 l2cap_chan_del(chan, ECONNREFUSED); 4360 break; 4361 } 4362 4363 l2cap_chan_unlock(chan); 4364 l2cap_chan_put(chan); 4365 4366 unlock: 4367 mutex_unlock(&conn->chan_lock); 4368 4369 return err; 4370 } 4371 4372 static inline void set_default_fcs(struct l2cap_chan *chan) 4373 { 4374 /* FCS is enabled only in ERTM or streaming mode, if one or both 4375 * sides request it. 4376 */ 4377 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 4378 chan->fcs = L2CAP_FCS_NONE; 4379 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) 4380 chan->fcs = L2CAP_FCS_CRC16; 4381 } 4382 4383 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, 4384 u8 ident, u16 flags) 4385 { 4386 struct l2cap_conn *conn = chan->conn; 4387 4388 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, 4389 flags); 4390 4391 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 4392 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 4393 4394 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, 4395 l2cap_build_conf_rsp(chan, data, 4396 L2CAP_CONF_SUCCESS, flags), data); 4397 } 4398 4399 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, 4400 u16 scid, u16 dcid) 4401 { 4402 struct l2cap_cmd_rej_cid rej; 4403 4404 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 4405 rej.scid = __cpu_to_le16(scid); 4406 rej.dcid = __cpu_to_le16(dcid); 4407 4408 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4409 } 4410 4411 static inline int l2cap_config_req(struct l2cap_conn *conn, 4412 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4413 u8 *data) 4414 { 4415 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 4416 u16 dcid, flags; 4417 u8 rsp[64]; 4418 struct l2cap_chan *chan; 4419 int len, err = 0; 4420 4421 if (cmd_len < sizeof(*req)) 4422 return -EPROTO; 4423 4424 dcid = __le16_to_cpu(req->dcid); 4425 flags = __le16_to_cpu(req->flags); 4426 4427 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 4428 4429 chan = l2cap_get_chan_by_scid(conn, dcid); 4430 if (!chan) { 4431 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); 4432 return 0; 4433 } 4434 4435 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && 4436 chan->state != BT_CONNECTED) { 4437 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, 4438 chan->dcid); 4439 goto unlock; 4440 } 4441 4442 /* Reject if config buffer is too small. */ 4443 len = cmd_len - sizeof(*req); 4444 if (chan->conf_len + len > sizeof(chan->conf_req)) { 4445 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4446 l2cap_build_conf_rsp(chan, rsp, 4447 L2CAP_CONF_REJECT, flags), rsp); 4448 goto unlock; 4449 } 4450 4451 /* Store config. */ 4452 memcpy(chan->conf_req + chan->conf_len, req->data, len); 4453 chan->conf_len += len; 4454 4455 if (flags & L2CAP_CONF_FLAG_CONTINUATION) { 4456 /* Incomplete config. Send empty response. */ 4457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4458 l2cap_build_conf_rsp(chan, rsp, 4459 L2CAP_CONF_SUCCESS, flags), rsp); 4460 goto unlock; 4461 } 4462 4463 /* Complete config. */ 4464 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); 4465 if (len < 0) { 4466 l2cap_send_disconn_req(chan, ECONNRESET); 4467 goto unlock; 4468 } 4469 4470 chan->ident = cmd->ident; 4471 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 4472 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) 4473 chan->num_conf_rsp++; 4474 4475 /* Reset config buffer. */ 4476 chan->conf_len = 0; 4477 4478 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) 4479 goto unlock; 4480 4481 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 4482 set_default_fcs(chan); 4483 4484 if (chan->mode == L2CAP_MODE_ERTM || 4485 chan->mode == L2CAP_MODE_STREAMING) 4486 err = l2cap_ertm_init(chan); 4487 4488 if (err < 0) 4489 l2cap_send_disconn_req(chan, -err); 4490 else 4491 l2cap_chan_ready(chan); 4492 4493 goto unlock; 4494 } 4495 4496 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { 4497 u8 buf[64]; 4498 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4499 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4500 chan->num_conf_req++; 4501 } 4502 4503 /* Got Conf Rsp PENDING from remote side and assume we sent 4504 Conf Rsp PENDING in the code above */ 4505 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && 4506 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4507 4508 /* check compatibility */ 4509 4510 /* Send rsp for BR/EDR channel */ 4511 if (!chan->hs_hcon) 4512 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); 4513 else 4514 chan->ident = cmd->ident; 4515 } 4516 4517 unlock: 4518 l2cap_chan_unlock(chan); 4519 l2cap_chan_put(chan); 4520 return err; 4521 } 4522 4523 static inline int l2cap_config_rsp(struct l2cap_conn *conn, 4524 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4525 u8 *data) 4526 { 4527 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 4528 u16 scid, flags, result; 4529 struct l2cap_chan *chan; 4530 int len = cmd_len - sizeof(*rsp); 4531 int err = 0; 4532 4533 if (cmd_len < sizeof(*rsp)) 4534 return -EPROTO; 4535 4536 scid = __le16_to_cpu(rsp->scid); 4537 flags = __le16_to_cpu(rsp->flags); 4538 result = __le16_to_cpu(rsp->result); 4539 4540 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, 4541 result, len); 4542 4543 chan = l2cap_get_chan_by_scid(conn, scid); 4544 if (!chan) 4545 return 0; 4546 4547 switch (result) { 4548 case L2CAP_CONF_SUCCESS: 4549 l2cap_conf_rfc_get(chan, rsp->data, len); 4550 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4551 break; 4552 4553 case L2CAP_CONF_PENDING: 4554 set_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4555 4556 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4557 char buf[64]; 4558 4559 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4560 buf, sizeof(buf), &result); 4561 if (len < 0) { 4562 l2cap_send_disconn_req(chan, ECONNRESET); 4563 goto done; 4564 } 4565 4566 if (!chan->hs_hcon) { 4567 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 4568 0); 4569 } else { 4570 if (l2cap_check_efs(chan)) { 4571 amp_create_logical_link(chan); 4572 chan->ident = cmd->ident; 4573 } 4574 } 4575 } 4576 goto done; 4577 4578 case L2CAP_CONF_UNKNOWN: 4579 case L2CAP_CONF_UNACCEPT: 4580 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 4581 char req[64]; 4582 4583 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 4584 l2cap_send_disconn_req(chan, ECONNRESET); 4585 goto done; 4586 } 4587 4588 /* throw out any old stored conf requests */ 4589 result = L2CAP_CONF_SUCCESS; 4590 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4591 req, sizeof(req), &result); 4592 if (len < 0) { 4593 l2cap_send_disconn_req(chan, ECONNRESET); 4594 goto done; 4595 } 4596 4597 l2cap_send_cmd(conn, l2cap_get_ident(conn), 4598 L2CAP_CONF_REQ, len, req); 4599 chan->num_conf_req++; 4600 if (result != L2CAP_CONF_SUCCESS) 4601 goto done; 4602 break; 4603 } 4604 fallthrough; 4605 4606 default: 4607 l2cap_chan_set_err(chan, ECONNRESET); 4608 4609 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 4610 l2cap_send_disconn_req(chan, ECONNRESET); 4611 goto done; 4612 } 4613 4614 if (flags & L2CAP_CONF_FLAG_CONTINUATION) 4615 goto done; 4616 4617 set_bit(CONF_INPUT_DONE, &chan->conf_state); 4618 4619 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 4620 set_default_fcs(chan); 4621 4622 if (chan->mode == L2CAP_MODE_ERTM || 4623 chan->mode == L2CAP_MODE_STREAMING) 4624 err = l2cap_ertm_init(chan); 4625 4626 if (err < 0) 4627 l2cap_send_disconn_req(chan, -err); 4628 else 4629 l2cap_chan_ready(chan); 4630 } 4631 4632 done: 4633 l2cap_chan_unlock(chan); 4634 l2cap_chan_put(chan); 4635 return err; 4636 } 4637 4638 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, 4639 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4640 u8 *data) 4641 { 4642 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4643 struct l2cap_disconn_rsp rsp; 4644 u16 dcid, scid; 4645 struct l2cap_chan *chan; 4646 4647 if (cmd_len != sizeof(*req)) 4648 return -EPROTO; 4649 4650 scid = __le16_to_cpu(req->scid); 4651 dcid = __le16_to_cpu(req->dcid); 4652 4653 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 4654 4655 mutex_lock(&conn->chan_lock); 4656 4657 chan = __l2cap_get_chan_by_scid(conn, dcid); 4658 if (!chan) { 4659 mutex_unlock(&conn->chan_lock); 4660 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); 4661 return 0; 4662 } 4663 4664 l2cap_chan_hold(chan); 4665 l2cap_chan_lock(chan); 4666 4667 rsp.dcid = cpu_to_le16(chan->scid); 4668 rsp.scid = cpu_to_le16(chan->dcid); 4669 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 4670 4671 chan->ops->set_shutdown(chan); 4672 4673 l2cap_chan_del(chan, ECONNRESET); 4674 4675 chan->ops->close(chan); 4676 4677 l2cap_chan_unlock(chan); 4678 l2cap_chan_put(chan); 4679 4680 mutex_unlock(&conn->chan_lock); 4681 4682 return 0; 4683 } 4684 4685 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, 4686 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4687 u8 *data) 4688 { 4689 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4690 u16 dcid, scid; 4691 struct l2cap_chan *chan; 4692 4693 if (cmd_len != sizeof(*rsp)) 4694 return -EPROTO; 4695 4696 scid = __le16_to_cpu(rsp->scid); 4697 dcid = __le16_to_cpu(rsp->dcid); 4698 4699 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 4700 4701 mutex_lock(&conn->chan_lock); 4702 4703 chan = __l2cap_get_chan_by_scid(conn, scid); 4704 if (!chan) { 4705 mutex_unlock(&conn->chan_lock); 4706 return 0; 4707 } 4708 4709 l2cap_chan_hold(chan); 4710 l2cap_chan_lock(chan); 4711 4712 if (chan->state != BT_DISCONN) { 4713 l2cap_chan_unlock(chan); 4714 l2cap_chan_put(chan); 4715 mutex_unlock(&conn->chan_lock); 4716 return 0; 4717 } 4718 4719 l2cap_chan_del(chan, 0); 4720 4721 chan->ops->close(chan); 4722 4723 l2cap_chan_unlock(chan); 4724 l2cap_chan_put(chan); 4725 4726 mutex_unlock(&conn->chan_lock); 4727 4728 return 0; 4729 } 4730 4731 static inline int l2cap_information_req(struct l2cap_conn *conn, 4732 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4733 u8 *data) 4734 { 4735 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4736 u16 type; 4737 4738 if (cmd_len != sizeof(*req)) 4739 return -EPROTO; 4740 4741 type = __le16_to_cpu(req->type); 4742 4743 BT_DBG("type 0x%4.4x", type); 4744 4745 if (type == L2CAP_IT_FEAT_MASK) { 4746 u8 buf[8]; 4747 u32 feat_mask = l2cap_feat_mask; 4748 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4749 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4750 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4751 if (!disable_ertm) 4752 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 4753 | L2CAP_FEAT_FCS; 4754 if (conn->local_fixed_chan & L2CAP_FC_A2MP) 4755 feat_mask |= L2CAP_FEAT_EXT_FLOW 4756 | L2CAP_FEAT_EXT_WINDOW; 4757 4758 put_unaligned_le32(feat_mask, rsp->data); 4759 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4760 buf); 4761 } else if (type == L2CAP_IT_FIXED_CHAN) { 4762 u8 buf[12]; 4763 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4764 4765 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4766 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4767 rsp->data[0] = conn->local_fixed_chan; 4768 memset(rsp->data + 1, 0, 7); 4769 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4770 buf); 4771 } else { 4772 struct l2cap_info_rsp rsp; 4773 rsp.type = cpu_to_le16(type); 4774 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 4775 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), 4776 &rsp); 4777 } 4778 4779 return 0; 4780 } 4781 4782 static inline int l2cap_information_rsp(struct l2cap_conn *conn, 4783 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4784 u8 *data) 4785 { 4786 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4787 u16 type, result; 4788 4789 if (cmd_len < sizeof(*rsp)) 4790 return -EPROTO; 4791 4792 type = __le16_to_cpu(rsp->type); 4793 result = __le16_to_cpu(rsp->result); 4794 4795 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 4796 4797 /* L2CAP Info req/rsp are unbound to channels, add extra checks */ 4798 if (cmd->ident != conn->info_ident || 4799 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 4800 return 0; 4801 4802 cancel_delayed_work(&conn->info_timer); 4803 4804 if (result != L2CAP_IR_SUCCESS) { 4805 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4806 conn->info_ident = 0; 4807 4808 l2cap_conn_start(conn); 4809 4810 return 0; 4811 } 4812 4813 switch (type) { 4814 case L2CAP_IT_FEAT_MASK: 4815 conn->feat_mask = get_unaligned_le32(rsp->data); 4816 4817 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 4818 struct l2cap_info_req req; 4819 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4820 4821 conn->info_ident = l2cap_get_ident(conn); 4822 4823 l2cap_send_cmd(conn, conn->info_ident, 4824 L2CAP_INFO_REQ, sizeof(req), &req); 4825 } else { 4826 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4827 conn->info_ident = 0; 4828 4829 l2cap_conn_start(conn); 4830 } 4831 break; 4832 4833 case L2CAP_IT_FIXED_CHAN: 4834 conn->remote_fixed_chan = rsp->data[0]; 4835 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4836 conn->info_ident = 0; 4837 4838 l2cap_conn_start(conn); 4839 break; 4840 } 4841 4842 return 0; 4843 } 4844 4845 static int l2cap_create_channel_req(struct l2cap_conn *conn, 4846 struct l2cap_cmd_hdr *cmd, 4847 u16 cmd_len, void *data) 4848 { 4849 struct l2cap_create_chan_req *req = data; 4850 struct l2cap_create_chan_rsp rsp; 4851 struct l2cap_chan *chan; 4852 struct hci_dev *hdev; 4853 u16 psm, scid; 4854 4855 if (cmd_len != sizeof(*req)) 4856 return -EPROTO; 4857 4858 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) 4859 return -EINVAL; 4860 4861 psm = le16_to_cpu(req->psm); 4862 scid = le16_to_cpu(req->scid); 4863 4864 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); 4865 4866 /* For controller id 0 make BR/EDR connection */ 4867 if (req->amp_id == AMP_ID_BREDR) { 4868 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, 4869 req->amp_id); 4870 return 0; 4871 } 4872 4873 /* Validate AMP controller id */ 4874 hdev = hci_dev_get(req->amp_id); 4875 if (!hdev) 4876 goto error; 4877 4878 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { 4879 hci_dev_put(hdev); 4880 goto error; 4881 } 4882 4883 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, 4884 req->amp_id); 4885 if (chan) { 4886 struct amp_mgr *mgr = conn->hcon->amp_mgr; 4887 struct hci_conn *hs_hcon; 4888 4889 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, 4890 &conn->hcon->dst); 4891 if (!hs_hcon) { 4892 hci_dev_put(hdev); 4893 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, 4894 chan->dcid); 4895 return 0; 4896 } 4897 4898 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); 4899 4900 mgr->bredr_chan = chan; 4901 chan->hs_hcon = hs_hcon; 4902 chan->fcs = L2CAP_FCS_NONE; 4903 conn->mtu = hdev->block_mtu; 4904 } 4905 4906 hci_dev_put(hdev); 4907 4908 return 0; 4909 4910 error: 4911 rsp.dcid = 0; 4912 rsp.scid = cpu_to_le16(scid); 4913 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP); 4914 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4915 4916 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, 4917 sizeof(rsp), &rsp); 4918 4919 return 0; 4920 } 4921 4922 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) 4923 { 4924 struct l2cap_move_chan_req req; 4925 u8 ident; 4926 4927 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id); 4928 4929 ident = l2cap_get_ident(chan->conn); 4930 chan->ident = ident; 4931 4932 req.icid = cpu_to_le16(chan->scid); 4933 req.dest_amp_id = dest_amp_id; 4934 4935 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), 4936 &req); 4937 4938 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); 4939 } 4940 4941 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result) 4942 { 4943 struct l2cap_move_chan_rsp rsp; 4944 4945 BT_DBG("chan %p, result 0x%4.4x", chan, result); 4946 4947 rsp.icid = cpu_to_le16(chan->dcid); 4948 rsp.result = cpu_to_le16(result); 4949 4950 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP, 4951 sizeof(rsp), &rsp); 4952 } 4953 4954 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result) 4955 { 4956 struct l2cap_move_chan_cfm cfm; 4957 4958 BT_DBG("chan %p, result 0x%4.4x", chan, result); 4959 4960 chan->ident = l2cap_get_ident(chan->conn); 4961 4962 cfm.icid = cpu_to_le16(chan->scid); 4963 cfm.result = cpu_to_le16(result); 4964 4965 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM, 4966 sizeof(cfm), &cfm); 4967 4968 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); 4969 } 4970 4971 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid) 4972 { 4973 struct l2cap_move_chan_cfm cfm; 4974 4975 BT_DBG("conn %p, icid 0x%4.4x", conn, icid); 4976 4977 cfm.icid = cpu_to_le16(icid); 4978 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED); 4979 4980 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM, 4981 sizeof(cfm), &cfm); 4982 } 4983 4984 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, 4985 u16 icid) 4986 { 4987 struct l2cap_move_chan_cfm_rsp rsp; 4988 4989 BT_DBG("icid 0x%4.4x", icid); 4990 4991 rsp.icid = cpu_to_le16(icid); 4992 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); 4993 } 4994 4995 static void __release_logical_link(struct l2cap_chan *chan) 4996 { 4997 chan->hs_hchan = NULL; 4998 chan->hs_hcon = NULL; 4999 5000 /* Placeholder - release the logical link */ 5001 } 5002 5003 static void l2cap_logical_fail(struct l2cap_chan *chan) 5004 { 5005 /* Logical link setup failed */ 5006 if (chan->state != BT_CONNECTED) { 5007 /* Create channel failure, disconnect */ 5008 l2cap_send_disconn_req(chan, ECONNRESET); 5009 return; 5010 } 5011 5012 switch (chan->move_role) { 5013 case L2CAP_MOVE_ROLE_RESPONDER: 5014 l2cap_move_done(chan); 5015 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP); 5016 break; 5017 case L2CAP_MOVE_ROLE_INITIATOR: 5018 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP || 5019 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) { 5020 /* Remote has only sent pending or 5021 * success responses, clean up 5022 */ 5023 l2cap_move_done(chan); 5024 } 5025 5026 /* Other amp move states imply that the move 5027 * has already aborted 5028 */ 5029 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5030 break; 5031 } 5032 } 5033 5034 static void l2cap_logical_finish_create(struct l2cap_chan *chan, 5035 struct hci_chan *hchan) 5036 { 5037 struct l2cap_conf_rsp rsp; 5038 5039 chan->hs_hchan = hchan; 5040 chan->hs_hcon->l2cap_data = chan->conn; 5041 5042 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0); 5043 5044 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 5045 int err; 5046 5047 set_default_fcs(chan); 5048 5049 err = l2cap_ertm_init(chan); 5050 if (err < 0) 5051 l2cap_send_disconn_req(chan, -err); 5052 else 5053 l2cap_chan_ready(chan); 5054 } 5055 } 5056 5057 static void l2cap_logical_finish_move(struct l2cap_chan *chan, 5058 struct hci_chan *hchan) 5059 { 5060 chan->hs_hcon = hchan->conn; 5061 chan->hs_hcon->l2cap_data = chan->conn; 5062 5063 BT_DBG("move_state %d", chan->move_state); 5064 5065 switch (chan->move_state) { 5066 case L2CAP_MOVE_WAIT_LOGICAL_COMP: 5067 /* Move confirm will be sent after a success 5068 * response is received 5069 */ 5070 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; 5071 break; 5072 case L2CAP_MOVE_WAIT_LOGICAL_CFM: 5073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5074 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; 5075 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { 5076 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; 5077 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); 5078 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { 5079 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; 5080 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); 5081 } 5082 break; 5083 default: 5084 /* Move was not in expected state, free the channel */ 5085 __release_logical_link(chan); 5086 5087 chan->move_state = L2CAP_MOVE_STABLE; 5088 } 5089 } 5090 5091 /* Call with chan locked */ 5092 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, 5093 u8 status) 5094 { 5095 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status); 5096 5097 if (status) { 5098 l2cap_logical_fail(chan); 5099 __release_logical_link(chan); 5100 return; 5101 } 5102 5103 if (chan->state != BT_CONNECTED) { 5104 /* Ignore logical link if channel is on BR/EDR */ 5105 if (chan->local_amp_id != AMP_ID_BREDR) 5106 l2cap_logical_finish_create(chan, hchan); 5107 } else { 5108 l2cap_logical_finish_move(chan, hchan); 5109 } 5110 } 5111 5112 void l2cap_move_start(struct l2cap_chan *chan) 5113 { 5114 BT_DBG("chan %p", chan); 5115 5116 if (chan->local_amp_id == AMP_ID_BREDR) { 5117 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED) 5118 return; 5119 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; 5120 chan->move_state = L2CAP_MOVE_WAIT_PREPARE; 5121 /* Placeholder - start physical link setup */ 5122 } else { 5123 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; 5124 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; 5125 chan->move_id = 0; 5126 l2cap_move_setup(chan); 5127 l2cap_send_move_chan_req(chan, 0); 5128 } 5129 } 5130 5131 static void l2cap_do_create(struct l2cap_chan *chan, int result, 5132 u8 local_amp_id, u8 remote_amp_id) 5133 { 5134 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state), 5135 local_amp_id, remote_amp_id); 5136 5137 chan->fcs = L2CAP_FCS_NONE; 5138 5139 /* Outgoing channel on AMP */ 5140 if (chan->state == BT_CONNECT) { 5141 if (result == L2CAP_CR_SUCCESS) { 5142 chan->local_amp_id = local_amp_id; 5143 l2cap_send_create_chan_req(chan, remote_amp_id); 5144 } else { 5145 /* Revert to BR/EDR connect */ 5146 l2cap_send_conn_req(chan); 5147 } 5148 5149 return; 5150 } 5151 5152 /* Incoming channel on AMP */ 5153 if (__l2cap_no_conn_pending(chan)) { 5154 struct l2cap_conn_rsp rsp; 5155 char buf[128]; 5156 rsp.scid = cpu_to_le16(chan->dcid); 5157 rsp.dcid = cpu_to_le16(chan->scid); 5158 5159 if (result == L2CAP_CR_SUCCESS) { 5160 /* Send successful response */ 5161 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 5162 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 5163 } else { 5164 /* Send negative response */ 5165 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM); 5166 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 5167 } 5168 5169 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP, 5170 sizeof(rsp), &rsp); 5171 5172 if (result == L2CAP_CR_SUCCESS) { 5173 l2cap_state_change(chan, BT_CONFIG); 5174 set_bit(CONF_REQ_SENT, &chan->conf_state); 5175 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), 5176 L2CAP_CONF_REQ, 5177 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 5178 chan->num_conf_req++; 5179 } 5180 } 5181 } 5182 5183 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id, 5184 u8 remote_amp_id) 5185 { 5186 l2cap_move_setup(chan); 5187 chan->move_id = local_amp_id; 5188 chan->move_state = L2CAP_MOVE_WAIT_RSP; 5189 5190 l2cap_send_move_chan_req(chan, remote_amp_id); 5191 } 5192 5193 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result) 5194 { 5195 struct hci_chan *hchan = NULL; 5196 5197 /* Placeholder - get hci_chan for logical link */ 5198 5199 if (hchan) { 5200 if (hchan->state == BT_CONNECTED) { 5201 /* Logical link is ready to go */ 5202 chan->hs_hcon = hchan->conn; 5203 chan->hs_hcon->l2cap_data = chan->conn; 5204 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; 5205 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); 5206 5207 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); 5208 } else { 5209 /* Wait for logical link to be ready */ 5210 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; 5211 } 5212 } else { 5213 /* Logical link not available */ 5214 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED); 5215 } 5216 } 5217 5218 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result) 5219 { 5220 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { 5221 u8 rsp_result; 5222 if (result == -EINVAL) 5223 rsp_result = L2CAP_MR_BAD_ID; 5224 else 5225 rsp_result = L2CAP_MR_NOT_ALLOWED; 5226 5227 l2cap_send_move_chan_rsp(chan, rsp_result); 5228 } 5229 5230 chan->move_role = L2CAP_MOVE_ROLE_NONE; 5231 chan->move_state = L2CAP_MOVE_STABLE; 5232 5233 /* Restart data transmission */ 5234 l2cap_ertm_send(chan); 5235 } 5236 5237 /* Invoke with locked chan */ 5238 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) 5239 { 5240 u8 local_amp_id = chan->local_amp_id; 5241 u8 remote_amp_id = chan->remote_amp_id; 5242 5243 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", 5244 chan, result, local_amp_id, remote_amp_id); 5245 5246 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) 5247 return; 5248 5249 if (chan->state != BT_CONNECTED) { 5250 l2cap_do_create(chan, result, local_amp_id, remote_amp_id); 5251 } else if (result != L2CAP_MR_SUCCESS) { 5252 l2cap_do_move_cancel(chan, result); 5253 } else { 5254 switch (chan->move_role) { 5255 case L2CAP_MOVE_ROLE_INITIATOR: 5256 l2cap_do_move_initiate(chan, local_amp_id, 5257 remote_amp_id); 5258 break; 5259 case L2CAP_MOVE_ROLE_RESPONDER: 5260 l2cap_do_move_respond(chan, result); 5261 break; 5262 default: 5263 l2cap_do_move_cancel(chan, result); 5264 break; 5265 } 5266 } 5267 } 5268 5269 static inline int l2cap_move_channel_req(struct l2cap_conn *conn, 5270 struct l2cap_cmd_hdr *cmd, 5271 u16 cmd_len, void *data) 5272 { 5273 struct l2cap_move_chan_req *req = data; 5274 struct l2cap_move_chan_rsp rsp; 5275 struct l2cap_chan *chan; 5276 u16 icid = 0; 5277 u16 result = L2CAP_MR_NOT_ALLOWED; 5278 5279 if (cmd_len != sizeof(*req)) 5280 return -EPROTO; 5281 5282 icid = le16_to_cpu(req->icid); 5283 5284 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); 5285 5286 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) 5287 return -EINVAL; 5288 5289 chan = l2cap_get_chan_by_dcid(conn, icid); 5290 if (!chan) { 5291 rsp.icid = cpu_to_le16(icid); 5292 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED); 5293 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP, 5294 sizeof(rsp), &rsp); 5295 return 0; 5296 } 5297 5298 chan->ident = cmd->ident; 5299 5300 if (chan->scid < L2CAP_CID_DYN_START || 5301 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY || 5302 (chan->mode != L2CAP_MODE_ERTM && 5303 chan->mode != L2CAP_MODE_STREAMING)) { 5304 result = L2CAP_MR_NOT_ALLOWED; 5305 goto send_move_response; 5306 } 5307 5308 if (chan->local_amp_id == req->dest_amp_id) { 5309 result = L2CAP_MR_SAME_ID; 5310 goto send_move_response; 5311 } 5312 5313 if (req->dest_amp_id != AMP_ID_BREDR) { 5314 struct hci_dev *hdev; 5315 hdev = hci_dev_get(req->dest_amp_id); 5316 if (!hdev || hdev->dev_type != HCI_AMP || 5317 !test_bit(HCI_UP, &hdev->flags)) { 5318 if (hdev) 5319 hci_dev_put(hdev); 5320 5321 result = L2CAP_MR_BAD_ID; 5322 goto send_move_response; 5323 } 5324 hci_dev_put(hdev); 5325 } 5326 5327 /* Detect a move collision. Only send a collision response 5328 * if this side has "lost", otherwise proceed with the move. 5329 * The winner has the larger bd_addr. 5330 */ 5331 if ((__chan_is_moving(chan) || 5332 chan->move_role != L2CAP_MOVE_ROLE_NONE) && 5333 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) { 5334 result = L2CAP_MR_COLLISION; 5335 goto send_move_response; 5336 } 5337 5338 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; 5339 l2cap_move_setup(chan); 5340 chan->move_id = req->dest_amp_id; 5341 5342 if (req->dest_amp_id == AMP_ID_BREDR) { 5343 /* Moving to BR/EDR */ 5344 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5345 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; 5346 result = L2CAP_MR_PEND; 5347 } else { 5348 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; 5349 result = L2CAP_MR_SUCCESS; 5350 } 5351 } else { 5352 chan->move_state = L2CAP_MOVE_WAIT_PREPARE; 5353 /* Placeholder - uncomment when amp functions are available */ 5354 /*amp_accept_physical(chan, req->dest_amp_id);*/ 5355 result = L2CAP_MR_PEND; 5356 } 5357 5358 send_move_response: 5359 l2cap_send_move_chan_rsp(chan, result); 5360 5361 l2cap_chan_unlock(chan); 5362 l2cap_chan_put(chan); 5363 5364 return 0; 5365 } 5366 5367 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) 5368 { 5369 struct l2cap_chan *chan; 5370 struct hci_chan *hchan = NULL; 5371 5372 chan = l2cap_get_chan_by_scid(conn, icid); 5373 if (!chan) { 5374 l2cap_send_move_chan_cfm_icid(conn, icid); 5375 return; 5376 } 5377 5378 __clear_chan_timer(chan); 5379 if (result == L2CAP_MR_PEND) 5380 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT); 5381 5382 switch (chan->move_state) { 5383 case L2CAP_MOVE_WAIT_LOGICAL_COMP: 5384 /* Move confirm will be sent when logical link 5385 * is complete. 5386 */ 5387 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; 5388 break; 5389 case L2CAP_MOVE_WAIT_RSP_SUCCESS: 5390 if (result == L2CAP_MR_PEND) { 5391 break; 5392 } else if (test_bit(CONN_LOCAL_BUSY, 5393 &chan->conn_state)) { 5394 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; 5395 } else { 5396 /* Logical link is up or moving to BR/EDR, 5397 * proceed with move 5398 */ 5399 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; 5400 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); 5401 } 5402 break; 5403 case L2CAP_MOVE_WAIT_RSP: 5404 /* Moving to AMP */ 5405 if (result == L2CAP_MR_SUCCESS) { 5406 /* Remote is ready, send confirm immediately 5407 * after logical link is ready 5408 */ 5409 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; 5410 } else { 5411 /* Both logical link and move success 5412 * are required to confirm 5413 */ 5414 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP; 5415 } 5416 5417 /* Placeholder - get hci_chan for logical link */ 5418 if (!hchan) { 5419 /* Logical link not available */ 5420 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5421 break; 5422 } 5423 5424 /* If the logical link is not yet connected, do not 5425 * send confirmation. 5426 */ 5427 if (hchan->state != BT_CONNECTED) 5428 break; 5429 5430 /* Logical link is already ready to go */ 5431 5432 chan->hs_hcon = hchan->conn; 5433 chan->hs_hcon->l2cap_data = chan->conn; 5434 5435 if (result == L2CAP_MR_SUCCESS) { 5436 /* Can confirm now */ 5437 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); 5438 } else { 5439 /* Now only need move success 5440 * to confirm 5441 */ 5442 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; 5443 } 5444 5445 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); 5446 break; 5447 default: 5448 /* Any other amp move state means the move failed. */ 5449 chan->move_id = chan->local_amp_id; 5450 l2cap_move_done(chan); 5451 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5452 } 5453 5454 l2cap_chan_unlock(chan); 5455 l2cap_chan_put(chan); 5456 } 5457 5458 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, 5459 u16 result) 5460 { 5461 struct l2cap_chan *chan; 5462 5463 chan = l2cap_get_chan_by_ident(conn, ident); 5464 if (!chan) { 5465 /* Could not locate channel, icid is best guess */ 5466 l2cap_send_move_chan_cfm_icid(conn, icid); 5467 return; 5468 } 5469 5470 __clear_chan_timer(chan); 5471 5472 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { 5473 if (result == L2CAP_MR_COLLISION) { 5474 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; 5475 } else { 5476 /* Cleanup - cancel move */ 5477 chan->move_id = chan->local_amp_id; 5478 l2cap_move_done(chan); 5479 } 5480 } 5481 5482 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5483 5484 l2cap_chan_unlock(chan); 5485 l2cap_chan_put(chan); 5486 } 5487 5488 static int l2cap_move_channel_rsp(struct l2cap_conn *conn, 5489 struct l2cap_cmd_hdr *cmd, 5490 u16 cmd_len, void *data) 5491 { 5492 struct l2cap_move_chan_rsp *rsp = data; 5493 u16 icid, result; 5494 5495 if (cmd_len != sizeof(*rsp)) 5496 return -EPROTO; 5497 5498 icid = le16_to_cpu(rsp->icid); 5499 result = le16_to_cpu(rsp->result); 5500 5501 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 5502 5503 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND) 5504 l2cap_move_continue(conn, icid, result); 5505 else 5506 l2cap_move_fail(conn, cmd->ident, icid, result); 5507 5508 return 0; 5509 } 5510 5511 static int l2cap_move_channel_confirm(struct l2cap_conn *conn, 5512 struct l2cap_cmd_hdr *cmd, 5513 u16 cmd_len, void *data) 5514 { 5515 struct l2cap_move_chan_cfm *cfm = data; 5516 struct l2cap_chan *chan; 5517 u16 icid, result; 5518 5519 if (cmd_len != sizeof(*cfm)) 5520 return -EPROTO; 5521 5522 icid = le16_to_cpu(cfm->icid); 5523 result = le16_to_cpu(cfm->result); 5524 5525 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 5526 5527 chan = l2cap_get_chan_by_dcid(conn, icid); 5528 if (!chan) { 5529 /* Spec requires a response even if the icid was not found */ 5530 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 5531 return 0; 5532 } 5533 5534 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) { 5535 if (result == L2CAP_MC_CONFIRMED) { 5536 chan->local_amp_id = chan->move_id; 5537 if (chan->local_amp_id == AMP_ID_BREDR) 5538 __release_logical_link(chan); 5539 } else { 5540 chan->move_id = chan->local_amp_id; 5541 } 5542 5543 l2cap_move_done(chan); 5544 } 5545 5546 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 5547 5548 l2cap_chan_unlock(chan); 5549 l2cap_chan_put(chan); 5550 5551 return 0; 5552 } 5553 5554 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, 5555 struct l2cap_cmd_hdr *cmd, 5556 u16 cmd_len, void *data) 5557 { 5558 struct l2cap_move_chan_cfm_rsp *rsp = data; 5559 struct l2cap_chan *chan; 5560 u16 icid; 5561 5562 if (cmd_len != sizeof(*rsp)) 5563 return -EPROTO; 5564 5565 icid = le16_to_cpu(rsp->icid); 5566 5567 BT_DBG("icid 0x%4.4x", icid); 5568 5569 chan = l2cap_get_chan_by_scid(conn, icid); 5570 if (!chan) 5571 return 0; 5572 5573 __clear_chan_timer(chan); 5574 5575 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) { 5576 chan->local_amp_id = chan->move_id; 5577 5578 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan) 5579 __release_logical_link(chan); 5580 5581 l2cap_move_done(chan); 5582 } 5583 5584 l2cap_chan_unlock(chan); 5585 l2cap_chan_put(chan); 5586 5587 return 0; 5588 } 5589 5590 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 5591 struct l2cap_cmd_hdr *cmd, 5592 u16 cmd_len, u8 *data) 5593 { 5594 struct hci_conn *hcon = conn->hcon; 5595 struct l2cap_conn_param_update_req *req; 5596 struct l2cap_conn_param_update_rsp rsp; 5597 u16 min, max, latency, to_multiplier; 5598 int err; 5599 5600 if (hcon->role != HCI_ROLE_MASTER) 5601 return -EINVAL; 5602 5603 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 5604 return -EPROTO; 5605 5606 req = (struct l2cap_conn_param_update_req *) data; 5607 min = __le16_to_cpu(req->min); 5608 max = __le16_to_cpu(req->max); 5609 latency = __le16_to_cpu(req->latency); 5610 to_multiplier = __le16_to_cpu(req->to_multiplier); 5611 5612 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", 5613 min, max, latency, to_multiplier); 5614 5615 memset(&rsp, 0, sizeof(rsp)); 5616 5617 err = hci_check_conn_params(min, max, latency, to_multiplier); 5618 if (err) 5619 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 5620 else 5621 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 5622 5623 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 5624 sizeof(rsp), &rsp); 5625 5626 if (!err) { 5627 u8 store_hint; 5628 5629 store_hint = hci_le_conn_update(hcon, min, max, latency, 5630 to_multiplier); 5631 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type, 5632 store_hint, min, max, latency, 5633 to_multiplier); 5634 5635 } 5636 5637 return 0; 5638 } 5639 5640 static int l2cap_le_connect_rsp(struct l2cap_conn *conn, 5641 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5642 u8 *data) 5643 { 5644 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; 5645 struct hci_conn *hcon = conn->hcon; 5646 u16 dcid, mtu, mps, credits, result; 5647 struct l2cap_chan *chan; 5648 int err, sec_level; 5649 5650 if (cmd_len < sizeof(*rsp)) 5651 return -EPROTO; 5652 5653 dcid = __le16_to_cpu(rsp->dcid); 5654 mtu = __le16_to_cpu(rsp->mtu); 5655 mps = __le16_to_cpu(rsp->mps); 5656 credits = __le16_to_cpu(rsp->credits); 5657 result = __le16_to_cpu(rsp->result); 5658 5659 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || 5660 dcid < L2CAP_CID_DYN_START || 5661 dcid > L2CAP_CID_LE_DYN_END)) 5662 return -EPROTO; 5663 5664 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", 5665 dcid, mtu, mps, credits, result); 5666 5667 mutex_lock(&conn->chan_lock); 5668 5669 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 5670 if (!chan) { 5671 err = -EBADSLT; 5672 goto unlock; 5673 } 5674 5675 err = 0; 5676 5677 l2cap_chan_lock(chan); 5678 5679 switch (result) { 5680 case L2CAP_CR_LE_SUCCESS: 5681 if (__l2cap_get_chan_by_dcid(conn, dcid)) { 5682 err = -EBADSLT; 5683 break; 5684 } 5685 5686 chan->ident = 0; 5687 chan->dcid = dcid; 5688 chan->omtu = mtu; 5689 chan->remote_mps = mps; 5690 chan->tx_credits = credits; 5691 l2cap_chan_ready(chan); 5692 break; 5693 5694 case L2CAP_CR_LE_AUTHENTICATION: 5695 case L2CAP_CR_LE_ENCRYPTION: 5696 /* If we already have MITM protection we can't do 5697 * anything. 5698 */ 5699 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 5700 l2cap_chan_del(chan, ECONNREFUSED); 5701 break; 5702 } 5703 5704 sec_level = hcon->sec_level + 1; 5705 if (chan->sec_level < sec_level) 5706 chan->sec_level = sec_level; 5707 5708 /* We'll need to send a new Connect Request */ 5709 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); 5710 5711 smp_conn_security(hcon, chan->sec_level); 5712 break; 5713 5714 default: 5715 l2cap_chan_del(chan, ECONNREFUSED); 5716 break; 5717 } 5718 5719 l2cap_chan_unlock(chan); 5720 5721 unlock: 5722 mutex_unlock(&conn->chan_lock); 5723 5724 return err; 5725 } 5726 5727 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, 5728 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5729 u8 *data) 5730 { 5731 int err = 0; 5732 5733 switch (cmd->code) { 5734 case L2CAP_COMMAND_REJ: 5735 l2cap_command_rej(conn, cmd, cmd_len, data); 5736 break; 5737 5738 case L2CAP_CONN_REQ: 5739 err = l2cap_connect_req(conn, cmd, cmd_len, data); 5740 break; 5741 5742 case L2CAP_CONN_RSP: 5743 case L2CAP_CREATE_CHAN_RSP: 5744 l2cap_connect_create_rsp(conn, cmd, cmd_len, data); 5745 break; 5746 5747 case L2CAP_CONF_REQ: 5748 err = l2cap_config_req(conn, cmd, cmd_len, data); 5749 break; 5750 5751 case L2CAP_CONF_RSP: 5752 l2cap_config_rsp(conn, cmd, cmd_len, data); 5753 break; 5754 5755 case L2CAP_DISCONN_REQ: 5756 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 5757 break; 5758 5759 case L2CAP_DISCONN_RSP: 5760 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 5761 break; 5762 5763 case L2CAP_ECHO_REQ: 5764 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); 5765 break; 5766 5767 case L2CAP_ECHO_RSP: 5768 break; 5769 5770 case L2CAP_INFO_REQ: 5771 err = l2cap_information_req(conn, cmd, cmd_len, data); 5772 break; 5773 5774 case L2CAP_INFO_RSP: 5775 l2cap_information_rsp(conn, cmd, cmd_len, data); 5776 break; 5777 5778 case L2CAP_CREATE_CHAN_REQ: 5779 err = l2cap_create_channel_req(conn, cmd, cmd_len, data); 5780 break; 5781 5782 case L2CAP_MOVE_CHAN_REQ: 5783 err = l2cap_move_channel_req(conn, cmd, cmd_len, data); 5784 break; 5785 5786 case L2CAP_MOVE_CHAN_RSP: 5787 l2cap_move_channel_rsp(conn, cmd, cmd_len, data); 5788 break; 5789 5790 case L2CAP_MOVE_CHAN_CFM: 5791 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); 5792 break; 5793 5794 case L2CAP_MOVE_CHAN_CFM_RSP: 5795 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); 5796 break; 5797 5798 default: 5799 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); 5800 err = -EINVAL; 5801 break; 5802 } 5803 5804 return err; 5805 } 5806 5807 static int l2cap_le_connect_req(struct l2cap_conn *conn, 5808 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5809 u8 *data) 5810 { 5811 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; 5812 struct l2cap_le_conn_rsp rsp; 5813 struct l2cap_chan *chan, *pchan; 5814 u16 dcid, scid, credits, mtu, mps; 5815 __le16 psm; 5816 u8 result; 5817 5818 if (cmd_len != sizeof(*req)) 5819 return -EPROTO; 5820 5821 scid = __le16_to_cpu(req->scid); 5822 mtu = __le16_to_cpu(req->mtu); 5823 mps = __le16_to_cpu(req->mps); 5824 psm = req->psm; 5825 dcid = 0; 5826 credits = 0; 5827 5828 if (mtu < 23 || mps < 23) 5829 return -EPROTO; 5830 5831 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), 5832 scid, mtu, mps); 5833 5834 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 5835 * page 1059: 5836 * 5837 * Valid range: 0x0001-0x00ff 5838 * 5839 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 5840 */ 5841 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 5842 result = L2CAP_CR_LE_BAD_PSM; 5843 chan = NULL; 5844 goto response; 5845 } 5846 5847 /* Check if we have socket listening on psm */ 5848 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 5849 &conn->hcon->dst, LE_LINK); 5850 if (!pchan) { 5851 result = L2CAP_CR_LE_BAD_PSM; 5852 chan = NULL; 5853 goto response; 5854 } 5855 5856 mutex_lock(&conn->chan_lock); 5857 l2cap_chan_lock(pchan); 5858 5859 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 5860 SMP_ALLOW_STK)) { 5861 result = L2CAP_CR_LE_AUTHENTICATION; 5862 chan = NULL; 5863 goto response_unlock; 5864 } 5865 5866 /* Check for valid dynamic CID range */ 5867 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 5868 result = L2CAP_CR_LE_INVALID_SCID; 5869 chan = NULL; 5870 goto response_unlock; 5871 } 5872 5873 /* Check if we already have channel with that dcid */ 5874 if (__l2cap_get_chan_by_dcid(conn, scid)) { 5875 result = L2CAP_CR_LE_SCID_IN_USE; 5876 chan = NULL; 5877 goto response_unlock; 5878 } 5879 5880 chan = pchan->ops->new_connection(pchan); 5881 if (!chan) { 5882 result = L2CAP_CR_LE_NO_MEM; 5883 goto response_unlock; 5884 } 5885 5886 bacpy(&chan->src, &conn->hcon->src); 5887 bacpy(&chan->dst, &conn->hcon->dst); 5888 chan->src_type = bdaddr_src_type(conn->hcon); 5889 chan->dst_type = bdaddr_dst_type(conn->hcon); 5890 chan->psm = psm; 5891 chan->dcid = scid; 5892 chan->omtu = mtu; 5893 chan->remote_mps = mps; 5894 5895 __l2cap_chan_add(conn, chan); 5896 5897 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); 5898 5899 dcid = chan->scid; 5900 credits = chan->rx_credits; 5901 5902 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 5903 5904 chan->ident = cmd->ident; 5905 5906 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 5907 l2cap_state_change(chan, BT_CONNECT2); 5908 /* The following result value is actually not defined 5909 * for LE CoC but we use it to let the function know 5910 * that it should bail out after doing its cleanup 5911 * instead of sending a response. 5912 */ 5913 result = L2CAP_CR_PEND; 5914 chan->ops->defer(chan); 5915 } else { 5916 l2cap_chan_ready(chan); 5917 result = L2CAP_CR_LE_SUCCESS; 5918 } 5919 5920 response_unlock: 5921 l2cap_chan_unlock(pchan); 5922 mutex_unlock(&conn->chan_lock); 5923 l2cap_chan_put(pchan); 5924 5925 if (result == L2CAP_CR_PEND) 5926 return 0; 5927 5928 response: 5929 if (chan) { 5930 rsp.mtu = cpu_to_le16(chan->imtu); 5931 rsp.mps = cpu_to_le16(chan->mps); 5932 } else { 5933 rsp.mtu = 0; 5934 rsp.mps = 0; 5935 } 5936 5937 rsp.dcid = cpu_to_le16(dcid); 5938 rsp.credits = cpu_to_le16(credits); 5939 rsp.result = cpu_to_le16(result); 5940 5941 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); 5942 5943 return 0; 5944 } 5945 5946 static inline int l2cap_le_credits(struct l2cap_conn *conn, 5947 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5948 u8 *data) 5949 { 5950 struct l2cap_le_credits *pkt; 5951 struct l2cap_chan *chan; 5952 u16 cid, credits, max_credits; 5953 5954 if (cmd_len != sizeof(*pkt)) 5955 return -EPROTO; 5956 5957 pkt = (struct l2cap_le_credits *) data; 5958 cid = __le16_to_cpu(pkt->cid); 5959 credits = __le16_to_cpu(pkt->credits); 5960 5961 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); 5962 5963 chan = l2cap_get_chan_by_dcid(conn, cid); 5964 if (!chan) 5965 return -EBADSLT; 5966 5967 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; 5968 if (credits > max_credits) { 5969 BT_ERR("LE credits overflow"); 5970 l2cap_send_disconn_req(chan, ECONNRESET); 5971 5972 /* Return 0 so that we don't trigger an unnecessary 5973 * command reject packet. 5974 */ 5975 goto unlock; 5976 } 5977 5978 chan->tx_credits += credits; 5979 5980 /* Resume sending */ 5981 l2cap_le_flowctl_send(chan); 5982 5983 if (chan->tx_credits) 5984 chan->ops->resume(chan); 5985 5986 unlock: 5987 l2cap_chan_unlock(chan); 5988 l2cap_chan_put(chan); 5989 5990 return 0; 5991 } 5992 5993 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, 5994 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5995 u8 *data) 5996 { 5997 struct l2cap_ecred_conn_req *req = (void *) data; 5998 struct { 5999 struct l2cap_ecred_conn_rsp rsp; 6000 __le16 dcid[L2CAP_ECRED_MAX_CID]; 6001 } __packed pdu; 6002 struct l2cap_chan *chan, *pchan; 6003 u16 mtu, mps; 6004 __le16 psm; 6005 u8 result, len = 0; 6006 int i, num_scid; 6007 bool defer = false; 6008 6009 if (!enable_ecred) 6010 return -EINVAL; 6011 6012 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 6013 result = L2CAP_CR_LE_INVALID_PARAMS; 6014 goto response; 6015 } 6016 6017 cmd_len -= sizeof(*req); 6018 num_scid = cmd_len / sizeof(u16); 6019 6020 if (num_scid > ARRAY_SIZE(pdu.dcid)) { 6021 result = L2CAP_CR_LE_INVALID_PARAMS; 6022 goto response; 6023 } 6024 6025 mtu = __le16_to_cpu(req->mtu); 6026 mps = __le16_to_cpu(req->mps); 6027 6028 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { 6029 result = L2CAP_CR_LE_UNACCEPT_PARAMS; 6030 goto response; 6031 } 6032 6033 psm = req->psm; 6034 6035 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 6036 * page 1059: 6037 * 6038 * Valid range: 0x0001-0x00ff 6039 * 6040 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 6041 */ 6042 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 6043 result = L2CAP_CR_LE_BAD_PSM; 6044 goto response; 6045 } 6046 6047 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); 6048 6049 memset(&pdu, 0, sizeof(pdu)); 6050 6051 /* Check if we have socket listening on psm */ 6052 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 6053 &conn->hcon->dst, LE_LINK); 6054 if (!pchan) { 6055 result = L2CAP_CR_LE_BAD_PSM; 6056 goto response; 6057 } 6058 6059 mutex_lock(&conn->chan_lock); 6060 l2cap_chan_lock(pchan); 6061 6062 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 6063 SMP_ALLOW_STK)) { 6064 result = L2CAP_CR_LE_AUTHENTICATION; 6065 goto unlock; 6066 } 6067 6068 result = L2CAP_CR_LE_SUCCESS; 6069 6070 for (i = 0; i < num_scid; i++) { 6071 u16 scid = __le16_to_cpu(req->scid[i]); 6072 6073 BT_DBG("scid[%d] 0x%4.4x", i, scid); 6074 6075 pdu.dcid[i] = 0x0000; 6076 len += sizeof(*pdu.dcid); 6077 6078 /* Check for valid dynamic CID range */ 6079 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 6080 result = L2CAP_CR_LE_INVALID_SCID; 6081 continue; 6082 } 6083 6084 /* Check if we already have channel with that dcid */ 6085 if (__l2cap_get_chan_by_dcid(conn, scid)) { 6086 result = L2CAP_CR_LE_SCID_IN_USE; 6087 continue; 6088 } 6089 6090 chan = pchan->ops->new_connection(pchan); 6091 if (!chan) { 6092 result = L2CAP_CR_LE_NO_MEM; 6093 continue; 6094 } 6095 6096 bacpy(&chan->src, &conn->hcon->src); 6097 bacpy(&chan->dst, &conn->hcon->dst); 6098 chan->src_type = bdaddr_src_type(conn->hcon); 6099 chan->dst_type = bdaddr_dst_type(conn->hcon); 6100 chan->psm = psm; 6101 chan->dcid = scid; 6102 chan->omtu = mtu; 6103 chan->remote_mps = mps; 6104 6105 __l2cap_chan_add(conn, chan); 6106 6107 l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); 6108 6109 /* Init response */ 6110 if (!pdu.rsp.credits) { 6111 pdu.rsp.mtu = cpu_to_le16(chan->imtu); 6112 pdu.rsp.mps = cpu_to_le16(chan->mps); 6113 pdu.rsp.credits = cpu_to_le16(chan->rx_credits); 6114 } 6115 6116 pdu.dcid[i] = cpu_to_le16(chan->scid); 6117 6118 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 6119 6120 chan->ident = cmd->ident; 6121 chan->mode = L2CAP_MODE_EXT_FLOWCTL; 6122 6123 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 6124 l2cap_state_change(chan, BT_CONNECT2); 6125 defer = true; 6126 chan->ops->defer(chan); 6127 } else { 6128 l2cap_chan_ready(chan); 6129 } 6130 } 6131 6132 unlock: 6133 l2cap_chan_unlock(pchan); 6134 mutex_unlock(&conn->chan_lock); 6135 l2cap_chan_put(pchan); 6136 6137 response: 6138 pdu.rsp.result = cpu_to_le16(result); 6139 6140 if (defer) 6141 return 0; 6142 6143 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, 6144 sizeof(pdu.rsp) + len, &pdu); 6145 6146 return 0; 6147 } 6148 6149 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, 6150 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 6151 u8 *data) 6152 { 6153 struct l2cap_ecred_conn_rsp *rsp = (void *) data; 6154 struct hci_conn *hcon = conn->hcon; 6155 u16 mtu, mps, credits, result; 6156 struct l2cap_chan *chan, *tmp; 6157 int err = 0, sec_level; 6158 int i = 0; 6159 6160 if (cmd_len < sizeof(*rsp)) 6161 return -EPROTO; 6162 6163 mtu = __le16_to_cpu(rsp->mtu); 6164 mps = __le16_to_cpu(rsp->mps); 6165 credits = __le16_to_cpu(rsp->credits); 6166 result = __le16_to_cpu(rsp->result); 6167 6168 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, 6169 result); 6170 6171 mutex_lock(&conn->chan_lock); 6172 6173 cmd_len -= sizeof(*rsp); 6174 6175 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 6176 u16 dcid; 6177 6178 if (chan->ident != cmd->ident || 6179 chan->mode != L2CAP_MODE_EXT_FLOWCTL || 6180 chan->state == BT_CONNECTED) 6181 continue; 6182 6183 l2cap_chan_lock(chan); 6184 6185 /* Check that there is a dcid for each pending channel */ 6186 if (cmd_len < sizeof(dcid)) { 6187 l2cap_chan_del(chan, ECONNREFUSED); 6188 l2cap_chan_unlock(chan); 6189 continue; 6190 } 6191 6192 dcid = __le16_to_cpu(rsp->dcid[i++]); 6193 cmd_len -= sizeof(u16); 6194 6195 BT_DBG("dcid[%d] 0x%4.4x", i, dcid); 6196 6197 /* Check if dcid is already in use */ 6198 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { 6199 /* If a device receives a 6200 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an 6201 * already-assigned Destination CID, then both the 6202 * original channel and the new channel shall be 6203 * immediately discarded and not used. 6204 */ 6205 l2cap_chan_del(chan, ECONNREFUSED); 6206 l2cap_chan_unlock(chan); 6207 chan = __l2cap_get_chan_by_dcid(conn, dcid); 6208 l2cap_chan_lock(chan); 6209 l2cap_chan_del(chan, ECONNRESET); 6210 l2cap_chan_unlock(chan); 6211 continue; 6212 } 6213 6214 switch (result) { 6215 case L2CAP_CR_LE_AUTHENTICATION: 6216 case L2CAP_CR_LE_ENCRYPTION: 6217 /* If we already have MITM protection we can't do 6218 * anything. 6219 */ 6220 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 6221 l2cap_chan_del(chan, ECONNREFUSED); 6222 break; 6223 } 6224 6225 sec_level = hcon->sec_level + 1; 6226 if (chan->sec_level < sec_level) 6227 chan->sec_level = sec_level; 6228 6229 /* We'll need to send a new Connect Request */ 6230 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); 6231 6232 smp_conn_security(hcon, chan->sec_level); 6233 break; 6234 6235 case L2CAP_CR_LE_BAD_PSM: 6236 l2cap_chan_del(chan, ECONNREFUSED); 6237 break; 6238 6239 default: 6240 /* If dcid was not set it means channels was refused */ 6241 if (!dcid) { 6242 l2cap_chan_del(chan, ECONNREFUSED); 6243 break; 6244 } 6245 6246 chan->ident = 0; 6247 chan->dcid = dcid; 6248 chan->omtu = mtu; 6249 chan->remote_mps = mps; 6250 chan->tx_credits = credits; 6251 l2cap_chan_ready(chan); 6252 break; 6253 } 6254 6255 l2cap_chan_unlock(chan); 6256 } 6257 6258 mutex_unlock(&conn->chan_lock); 6259 6260 return err; 6261 } 6262 6263 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, 6264 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 6265 u8 *data) 6266 { 6267 struct l2cap_ecred_reconf_req *req = (void *) data; 6268 struct l2cap_ecred_reconf_rsp rsp; 6269 u16 mtu, mps, result; 6270 struct l2cap_chan *chan; 6271 int i, num_scid; 6272 6273 if (!enable_ecred) 6274 return -EINVAL; 6275 6276 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { 6277 result = L2CAP_CR_LE_INVALID_PARAMS; 6278 goto respond; 6279 } 6280 6281 mtu = __le16_to_cpu(req->mtu); 6282 mps = __le16_to_cpu(req->mps); 6283 6284 BT_DBG("mtu %u mps %u", mtu, mps); 6285 6286 if (mtu < L2CAP_ECRED_MIN_MTU) { 6287 result = L2CAP_RECONF_INVALID_MTU; 6288 goto respond; 6289 } 6290 6291 if (mps < L2CAP_ECRED_MIN_MPS) { 6292 result = L2CAP_RECONF_INVALID_MPS; 6293 goto respond; 6294 } 6295 6296 cmd_len -= sizeof(*req); 6297 num_scid = cmd_len / sizeof(u16); 6298 result = L2CAP_RECONF_SUCCESS; 6299 6300 for (i = 0; i < num_scid; i++) { 6301 u16 scid; 6302 6303 scid = __le16_to_cpu(req->scid[i]); 6304 if (!scid) 6305 return -EPROTO; 6306 6307 chan = __l2cap_get_chan_by_dcid(conn, scid); 6308 if (!chan) 6309 continue; 6310 6311 /* If the MTU value is decreased for any of the included 6312 * channels, then the receiver shall disconnect all 6313 * included channels. 6314 */ 6315 if (chan->omtu > mtu) { 6316 BT_ERR("chan %p decreased MTU %u -> %u", chan, 6317 chan->omtu, mtu); 6318 result = L2CAP_RECONF_INVALID_MTU; 6319 } 6320 6321 chan->omtu = mtu; 6322 chan->remote_mps = mps; 6323 } 6324 6325 respond: 6326 rsp.result = cpu_to_le16(result); 6327 6328 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), 6329 &rsp); 6330 6331 return 0; 6332 } 6333 6334 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, 6335 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 6336 u8 *data) 6337 { 6338 struct l2cap_chan *chan, *tmp; 6339 struct l2cap_ecred_conn_rsp *rsp = (void *) data; 6340 u16 result; 6341 6342 if (cmd_len < sizeof(*rsp)) 6343 return -EPROTO; 6344 6345 result = __le16_to_cpu(rsp->result); 6346 6347 BT_DBG("result 0x%4.4x", rsp->result); 6348 6349 if (!result) 6350 return 0; 6351 6352 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 6353 if (chan->ident != cmd->ident) 6354 continue; 6355 6356 l2cap_chan_del(chan, ECONNRESET); 6357 } 6358 6359 return 0; 6360 } 6361 6362 static inline int l2cap_le_command_rej(struct l2cap_conn *conn, 6363 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 6364 u8 *data) 6365 { 6366 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 6367 struct l2cap_chan *chan; 6368 6369 if (cmd_len < sizeof(*rej)) 6370 return -EPROTO; 6371 6372 mutex_lock(&conn->chan_lock); 6373 6374 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 6375 if (!chan) 6376 goto done; 6377 6378 l2cap_chan_lock(chan); 6379 l2cap_chan_del(chan, ECONNREFUSED); 6380 l2cap_chan_unlock(chan); 6381 6382 done: 6383 mutex_unlock(&conn->chan_lock); 6384 return 0; 6385 } 6386 6387 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, 6388 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 6389 u8 *data) 6390 { 6391 int err = 0; 6392 6393 switch (cmd->code) { 6394 case L2CAP_COMMAND_REJ: 6395 l2cap_le_command_rej(conn, cmd, cmd_len, data); 6396 break; 6397 6398 case L2CAP_CONN_PARAM_UPDATE_REQ: 6399 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); 6400 break; 6401 6402 case L2CAP_CONN_PARAM_UPDATE_RSP: 6403 break; 6404 6405 case L2CAP_LE_CONN_RSP: 6406 l2cap_le_connect_rsp(conn, cmd, cmd_len, data); 6407 break; 6408 6409 case L2CAP_LE_CONN_REQ: 6410 err = l2cap_le_connect_req(conn, cmd, cmd_len, data); 6411 break; 6412 6413 case L2CAP_LE_CREDITS: 6414 err = l2cap_le_credits(conn, cmd, cmd_len, data); 6415 break; 6416 6417 case L2CAP_ECRED_CONN_REQ: 6418 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); 6419 break; 6420 6421 case L2CAP_ECRED_CONN_RSP: 6422 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); 6423 break; 6424 6425 case L2CAP_ECRED_RECONF_REQ: 6426 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); 6427 break; 6428 6429 case L2CAP_ECRED_RECONF_RSP: 6430 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); 6431 break; 6432 6433 case L2CAP_DISCONN_REQ: 6434 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 6435 break; 6436 6437 case L2CAP_DISCONN_RSP: 6438 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 6439 break; 6440 6441 default: 6442 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); 6443 err = -EINVAL; 6444 break; 6445 } 6446 6447 return err; 6448 } 6449 6450 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, 6451 struct sk_buff *skb) 6452 { 6453 struct hci_conn *hcon = conn->hcon; 6454 struct l2cap_cmd_hdr *cmd; 6455 u16 len; 6456 int err; 6457 6458 if (hcon->type != LE_LINK) 6459 goto drop; 6460 6461 if (skb->len < L2CAP_CMD_HDR_SIZE) 6462 goto drop; 6463 6464 cmd = (void *) skb->data; 6465 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 6466 6467 len = le16_to_cpu(cmd->len); 6468 6469 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); 6470 6471 if (len != skb->len || !cmd->ident) { 6472 BT_DBG("corrupted command"); 6473 goto drop; 6474 } 6475 6476 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); 6477 if (err) { 6478 struct l2cap_cmd_rej_unk rej; 6479 6480 BT_ERR("Wrong link type (%d)", err); 6481 6482 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 6483 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 6484 sizeof(rej), &rej); 6485 } 6486 6487 drop: 6488 kfree_skb(skb); 6489 } 6490 6491 static inline void l2cap_sig_channel(struct l2cap_conn *conn, 6492 struct sk_buff *skb) 6493 { 6494 struct hci_conn *hcon = conn->hcon; 6495 struct l2cap_cmd_hdr *cmd; 6496 int err; 6497 6498 l2cap_raw_recv(conn, skb); 6499 6500 if (hcon->type != ACL_LINK) 6501 goto drop; 6502 6503 while (skb->len >= L2CAP_CMD_HDR_SIZE) { 6504 u16 len; 6505 6506 cmd = (void *) skb->data; 6507 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 6508 6509 len = le16_to_cpu(cmd->len); 6510 6511 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, 6512 cmd->ident); 6513 6514 if (len > skb->len || !cmd->ident) { 6515 BT_DBG("corrupted command"); 6516 break; 6517 } 6518 6519 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); 6520 if (err) { 6521 struct l2cap_cmd_rej_unk rej; 6522 6523 BT_ERR("Wrong link type (%d)", err); 6524 6525 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 6526 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 6527 sizeof(rej), &rej); 6528 } 6529 6530 skb_pull(skb, len); 6531 } 6532 6533 drop: 6534 kfree_skb(skb); 6535 } 6536 6537 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) 6538 { 6539 u16 our_fcs, rcv_fcs; 6540 int hdr_size; 6541 6542 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 6543 hdr_size = L2CAP_EXT_HDR_SIZE; 6544 else 6545 hdr_size = L2CAP_ENH_HDR_SIZE; 6546 6547 if (chan->fcs == L2CAP_FCS_CRC16) { 6548 skb_trim(skb, skb->len - L2CAP_FCS_SIZE); 6549 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 6550 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 6551 6552 if (our_fcs != rcv_fcs) 6553 return -EBADMSG; 6554 } 6555 return 0; 6556 } 6557 6558 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 6559 { 6560 struct l2cap_ctrl control; 6561 6562 BT_DBG("chan %p", chan); 6563 6564 memset(&control, 0, sizeof(control)); 6565 control.sframe = 1; 6566 control.final = 1; 6567 control.reqseq = chan->buffer_seq; 6568 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6569 6570 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6571 control.super = L2CAP_SUPER_RNR; 6572 l2cap_send_sframe(chan, &control); 6573 } 6574 6575 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 6576 chan->unacked_frames > 0) 6577 __set_retrans_timer(chan); 6578 6579 /* Send pending iframes */ 6580 l2cap_ertm_send(chan); 6581 6582 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 6583 test_bit(CONN_SEND_FBIT, &chan->conn_state)) { 6584 /* F-bit wasn't sent in an s-frame or i-frame yet, so 6585 * send it now. 6586 */ 6587 control.super = L2CAP_SUPER_RR; 6588 l2cap_send_sframe(chan, &control); 6589 } 6590 } 6591 6592 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, 6593 struct sk_buff **last_frag) 6594 { 6595 /* skb->len reflects data in skb as well as all fragments 6596 * skb->data_len reflects only data in fragments 6597 */ 6598 if (!skb_has_frag_list(skb)) 6599 skb_shinfo(skb)->frag_list = new_frag; 6600 6601 new_frag->next = NULL; 6602 6603 (*last_frag)->next = new_frag; 6604 *last_frag = new_frag; 6605 6606 skb->len += new_frag->len; 6607 skb->data_len += new_frag->len; 6608 skb->truesize += new_frag->truesize; 6609 } 6610 6611 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, 6612 struct l2cap_ctrl *control) 6613 { 6614 int err = -EINVAL; 6615 6616 switch (control->sar) { 6617 case L2CAP_SAR_UNSEGMENTED: 6618 if (chan->sdu) 6619 break; 6620 6621 err = chan->ops->recv(chan, skb); 6622 break; 6623 6624 case L2CAP_SAR_START: 6625 if (chan->sdu) 6626 break; 6627 6628 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) 6629 break; 6630 6631 chan->sdu_len = get_unaligned_le16(skb->data); 6632 skb_pull(skb, L2CAP_SDULEN_SIZE); 6633 6634 if (chan->sdu_len > chan->imtu) { 6635 err = -EMSGSIZE; 6636 break; 6637 } 6638 6639 if (skb->len >= chan->sdu_len) 6640 break; 6641 6642 chan->sdu = skb; 6643 chan->sdu_last_frag = skb; 6644 6645 skb = NULL; 6646 err = 0; 6647 break; 6648 6649 case L2CAP_SAR_CONTINUE: 6650 if (!chan->sdu) 6651 break; 6652 6653 append_skb_frag(chan->sdu, skb, 6654 &chan->sdu_last_frag); 6655 skb = NULL; 6656 6657 if (chan->sdu->len >= chan->sdu_len) 6658 break; 6659 6660 err = 0; 6661 break; 6662 6663 case L2CAP_SAR_END: 6664 if (!chan->sdu) 6665 break; 6666 6667 append_skb_frag(chan->sdu, skb, 6668 &chan->sdu_last_frag); 6669 skb = NULL; 6670 6671 if (chan->sdu->len != chan->sdu_len) 6672 break; 6673 6674 err = chan->ops->recv(chan, chan->sdu); 6675 6676 if (!err) { 6677 /* Reassembly complete */ 6678 chan->sdu = NULL; 6679 chan->sdu_last_frag = NULL; 6680 chan->sdu_len = 0; 6681 } 6682 break; 6683 } 6684 6685 if (err) { 6686 kfree_skb(skb); 6687 kfree_skb(chan->sdu); 6688 chan->sdu = NULL; 6689 chan->sdu_last_frag = NULL; 6690 chan->sdu_len = 0; 6691 } 6692 6693 return err; 6694 } 6695 6696 static int l2cap_resegment(struct l2cap_chan *chan) 6697 { 6698 /* Placeholder */ 6699 return 0; 6700 } 6701 6702 void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 6703 { 6704 u8 event; 6705 6706 if (chan->mode != L2CAP_MODE_ERTM) 6707 return; 6708 6709 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; 6710 l2cap_tx(chan, NULL, NULL, event); 6711 } 6712 6713 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) 6714 { 6715 int err = 0; 6716 /* Pass sequential frames to l2cap_reassemble_sdu() 6717 * until a gap is encountered. 6718 */ 6719 6720 BT_DBG("chan %p", chan); 6721 6722 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6723 struct sk_buff *skb; 6724 BT_DBG("Searching for skb with txseq %d (queue len %d)", 6725 chan->buffer_seq, skb_queue_len(&chan->srej_q)); 6726 6727 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); 6728 6729 if (!skb) 6730 break; 6731 6732 skb_unlink(skb, &chan->srej_q); 6733 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 6734 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); 6735 if (err) 6736 break; 6737 } 6738 6739 if (skb_queue_empty(&chan->srej_q)) { 6740 chan->rx_state = L2CAP_RX_STATE_RECV; 6741 l2cap_send_ack(chan); 6742 } 6743 6744 return err; 6745 } 6746 6747 static void l2cap_handle_srej(struct l2cap_chan *chan, 6748 struct l2cap_ctrl *control) 6749 { 6750 struct sk_buff *skb; 6751 6752 BT_DBG("chan %p, control %p", chan, control); 6753 6754 if (control->reqseq == chan->next_tx_seq) { 6755 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 6756 l2cap_send_disconn_req(chan, ECONNRESET); 6757 return; 6758 } 6759 6760 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 6761 6762 if (skb == NULL) { 6763 BT_DBG("Seq %d not available for retransmission", 6764 control->reqseq); 6765 return; 6766 } 6767 6768 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { 6769 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 6770 l2cap_send_disconn_req(chan, ECONNRESET); 6771 return; 6772 } 6773 6774 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6775 6776 if (control->poll) { 6777 l2cap_pass_to_tx(chan, control); 6778 6779 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6780 l2cap_retransmit(chan, control); 6781 l2cap_ertm_send(chan); 6782 6783 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 6784 set_bit(CONN_SREJ_ACT, &chan->conn_state); 6785 chan->srej_save_reqseq = control->reqseq; 6786 } 6787 } else { 6788 l2cap_pass_to_tx_fbit(chan, control); 6789 6790 if (control->final) { 6791 if (chan->srej_save_reqseq != control->reqseq || 6792 !test_and_clear_bit(CONN_SREJ_ACT, 6793 &chan->conn_state)) 6794 l2cap_retransmit(chan, control); 6795 } else { 6796 l2cap_retransmit(chan, control); 6797 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 6798 set_bit(CONN_SREJ_ACT, &chan->conn_state); 6799 chan->srej_save_reqseq = control->reqseq; 6800 } 6801 } 6802 } 6803 } 6804 6805 static void l2cap_handle_rej(struct l2cap_chan *chan, 6806 struct l2cap_ctrl *control) 6807 { 6808 struct sk_buff *skb; 6809 6810 BT_DBG("chan %p, control %p", chan, control); 6811 6812 if (control->reqseq == chan->next_tx_seq) { 6813 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 6814 l2cap_send_disconn_req(chan, ECONNRESET); 6815 return; 6816 } 6817 6818 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 6819 6820 if (chan->max_tx && skb && 6821 bt_cb(skb)->l2cap.retries >= chan->max_tx) { 6822 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 6823 l2cap_send_disconn_req(chan, ECONNRESET); 6824 return; 6825 } 6826 6827 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6828 6829 l2cap_pass_to_tx(chan, control); 6830 6831 if (control->final) { 6832 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 6833 l2cap_retransmit_all(chan, control); 6834 } else { 6835 l2cap_retransmit_all(chan, control); 6836 l2cap_ertm_send(chan); 6837 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) 6838 set_bit(CONN_REJ_ACT, &chan->conn_state); 6839 } 6840 } 6841 6842 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) 6843 { 6844 BT_DBG("chan %p, txseq %d", chan, txseq); 6845 6846 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, 6847 chan->expected_tx_seq); 6848 6849 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 6850 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 6851 chan->tx_win) { 6852 /* See notes below regarding "double poll" and 6853 * invalid packets. 6854 */ 6855 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 6856 BT_DBG("Invalid/Ignore - after SREJ"); 6857 return L2CAP_TXSEQ_INVALID_IGNORE; 6858 } else { 6859 BT_DBG("Invalid - in window after SREJ sent"); 6860 return L2CAP_TXSEQ_INVALID; 6861 } 6862 } 6863 6864 if (chan->srej_list.head == txseq) { 6865 BT_DBG("Expected SREJ"); 6866 return L2CAP_TXSEQ_EXPECTED_SREJ; 6867 } 6868 6869 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { 6870 BT_DBG("Duplicate SREJ - txseq already stored"); 6871 return L2CAP_TXSEQ_DUPLICATE_SREJ; 6872 } 6873 6874 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { 6875 BT_DBG("Unexpected SREJ - not requested"); 6876 return L2CAP_TXSEQ_UNEXPECTED_SREJ; 6877 } 6878 } 6879 6880 if (chan->expected_tx_seq == txseq) { 6881 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 6882 chan->tx_win) { 6883 BT_DBG("Invalid - txseq outside tx window"); 6884 return L2CAP_TXSEQ_INVALID; 6885 } else { 6886 BT_DBG("Expected"); 6887 return L2CAP_TXSEQ_EXPECTED; 6888 } 6889 } 6890 6891 if (__seq_offset(chan, txseq, chan->last_acked_seq) < 6892 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { 6893 BT_DBG("Duplicate - expected_tx_seq later than txseq"); 6894 return L2CAP_TXSEQ_DUPLICATE; 6895 } 6896 6897 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { 6898 /* A source of invalid packets is a "double poll" condition, 6899 * where delays cause us to send multiple poll packets. If 6900 * the remote stack receives and processes both polls, 6901 * sequence numbers can wrap around in such a way that a 6902 * resent frame has a sequence number that looks like new data 6903 * with a sequence gap. This would trigger an erroneous SREJ 6904 * request. 6905 * 6906 * Fortunately, this is impossible with a tx window that's 6907 * less than half of the maximum sequence number, which allows 6908 * invalid frames to be safely ignored. 6909 * 6910 * With tx window sizes greater than half of the tx window 6911 * maximum, the frame is invalid and cannot be ignored. This 6912 * causes a disconnect. 6913 */ 6914 6915 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 6916 BT_DBG("Invalid/Ignore - txseq outside tx window"); 6917 return L2CAP_TXSEQ_INVALID_IGNORE; 6918 } else { 6919 BT_DBG("Invalid - txseq outside tx window"); 6920 return L2CAP_TXSEQ_INVALID; 6921 } 6922 } else { 6923 BT_DBG("Unexpected - txseq indicates missing frames"); 6924 return L2CAP_TXSEQ_UNEXPECTED; 6925 } 6926 } 6927 6928 static int l2cap_rx_state_recv(struct l2cap_chan *chan, 6929 struct l2cap_ctrl *control, 6930 struct sk_buff *skb, u8 event) 6931 { 6932 struct l2cap_ctrl local_control; 6933 int err = 0; 6934 bool skb_in_use = false; 6935 6936 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6937 event); 6938 6939 switch (event) { 6940 case L2CAP_EV_RECV_IFRAME: 6941 switch (l2cap_classify_txseq(chan, control->txseq)) { 6942 case L2CAP_TXSEQ_EXPECTED: 6943 l2cap_pass_to_tx(chan, control); 6944 6945 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6946 BT_DBG("Busy, discarding expected seq %d", 6947 control->txseq); 6948 break; 6949 } 6950 6951 chan->expected_tx_seq = __next_seq(chan, 6952 control->txseq); 6953 6954 chan->buffer_seq = chan->expected_tx_seq; 6955 skb_in_use = true; 6956 6957 /* l2cap_reassemble_sdu may free skb, hence invalidate 6958 * control, so make a copy in advance to use it after 6959 * l2cap_reassemble_sdu returns and to avoid the race 6960 * condition, for example: 6961 * 6962 * The current thread calls: 6963 * l2cap_reassemble_sdu 6964 * chan->ops->recv == l2cap_sock_recv_cb 6965 * __sock_queue_rcv_skb 6966 * Another thread calls: 6967 * bt_sock_recvmsg 6968 * skb_recv_datagram 6969 * skb_free_datagram 6970 * Then the current thread tries to access control, but 6971 * it was freed by skb_free_datagram. 6972 */ 6973 local_control = *control; 6974 err = l2cap_reassemble_sdu(chan, skb, control); 6975 if (err) 6976 break; 6977 6978 if (local_control.final) { 6979 if (!test_and_clear_bit(CONN_REJ_ACT, 6980 &chan->conn_state)) { 6981 local_control.final = 0; 6982 l2cap_retransmit_all(chan, &local_control); 6983 l2cap_ertm_send(chan); 6984 } 6985 } 6986 6987 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 6988 l2cap_send_ack(chan); 6989 break; 6990 case L2CAP_TXSEQ_UNEXPECTED: 6991 l2cap_pass_to_tx(chan, control); 6992 6993 /* Can't issue SREJ frames in the local busy state. 6994 * Drop this frame, it will be seen as missing 6995 * when local busy is exited. 6996 */ 6997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6998 BT_DBG("Busy, discarding unexpected seq %d", 6999 control->txseq); 7000 break; 7001 } 7002 7003 /* There was a gap in the sequence, so an SREJ 7004 * must be sent for each missing frame. The 7005 * current frame is stored for later use. 7006 */ 7007 skb_queue_tail(&chan->srej_q, skb); 7008 skb_in_use = true; 7009 BT_DBG("Queued %p (queue len %d)", skb, 7010 skb_queue_len(&chan->srej_q)); 7011 7012 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 7013 l2cap_seq_list_clear(&chan->srej_list); 7014 l2cap_send_srej(chan, control->txseq); 7015 7016 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; 7017 break; 7018 case L2CAP_TXSEQ_DUPLICATE: 7019 l2cap_pass_to_tx(chan, control); 7020 break; 7021 case L2CAP_TXSEQ_INVALID_IGNORE: 7022 break; 7023 case L2CAP_TXSEQ_INVALID: 7024 default: 7025 l2cap_send_disconn_req(chan, ECONNRESET); 7026 break; 7027 } 7028 break; 7029 case L2CAP_EV_RECV_RR: 7030 l2cap_pass_to_tx(chan, control); 7031 if (control->final) { 7032 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 7033 7034 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) && 7035 !__chan_is_moving(chan)) { 7036 control->final = 0; 7037 l2cap_retransmit_all(chan, control); 7038 } 7039 7040 l2cap_ertm_send(chan); 7041 } else if (control->poll) { 7042 l2cap_send_i_or_rr_or_rnr(chan); 7043 } else { 7044 if (test_and_clear_bit(CONN_REMOTE_BUSY, 7045 &chan->conn_state) && 7046 chan->unacked_frames) 7047 __set_retrans_timer(chan); 7048 7049 l2cap_ertm_send(chan); 7050 } 7051 break; 7052 case L2CAP_EV_RECV_RNR: 7053 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 7054 l2cap_pass_to_tx(chan, control); 7055 if (control && control->poll) { 7056 set_bit(CONN_SEND_FBIT, &chan->conn_state); 7057 l2cap_send_rr_or_rnr(chan, 0); 7058 } 7059 __clear_retrans_timer(chan); 7060 l2cap_seq_list_clear(&chan->retrans_list); 7061 break; 7062 case L2CAP_EV_RECV_REJ: 7063 l2cap_handle_rej(chan, control); 7064 break; 7065 case L2CAP_EV_RECV_SREJ: 7066 l2cap_handle_srej(chan, control); 7067 break; 7068 default: 7069 break; 7070 } 7071 7072 if (skb && !skb_in_use) { 7073 BT_DBG("Freeing %p", skb); 7074 kfree_skb(skb); 7075 } 7076 7077 return err; 7078 } 7079 7080 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, 7081 struct l2cap_ctrl *control, 7082 struct sk_buff *skb, u8 event) 7083 { 7084 int err = 0; 7085 u16 txseq = control->txseq; 7086 bool skb_in_use = false; 7087 7088 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 7089 event); 7090 7091 switch (event) { 7092 case L2CAP_EV_RECV_IFRAME: 7093 switch (l2cap_classify_txseq(chan, txseq)) { 7094 case L2CAP_TXSEQ_EXPECTED: 7095 /* Keep frame for reassembly later */ 7096 l2cap_pass_to_tx(chan, control); 7097 skb_queue_tail(&chan->srej_q, skb); 7098 skb_in_use = true; 7099 BT_DBG("Queued %p (queue len %d)", skb, 7100 skb_queue_len(&chan->srej_q)); 7101 7102 chan->expected_tx_seq = __next_seq(chan, txseq); 7103 break; 7104 case L2CAP_TXSEQ_EXPECTED_SREJ: 7105 l2cap_seq_list_pop(&chan->srej_list); 7106 7107 l2cap_pass_to_tx(chan, control); 7108 skb_queue_tail(&chan->srej_q, skb); 7109 skb_in_use = true; 7110 BT_DBG("Queued %p (queue len %d)", skb, 7111 skb_queue_len(&chan->srej_q)); 7112 7113 err = l2cap_rx_queued_iframes(chan); 7114 if (err) 7115 break; 7116 7117 break; 7118 case L2CAP_TXSEQ_UNEXPECTED: 7119 /* Got a frame that can't be reassembled yet. 7120 * Save it for later, and send SREJs to cover 7121 * the missing frames. 7122 */ 7123 skb_queue_tail(&chan->srej_q, skb); 7124 skb_in_use = true; 7125 BT_DBG("Queued %p (queue len %d)", skb, 7126 skb_queue_len(&chan->srej_q)); 7127 7128 l2cap_pass_to_tx(chan, control); 7129 l2cap_send_srej(chan, control->txseq); 7130 break; 7131 case L2CAP_TXSEQ_UNEXPECTED_SREJ: 7132 /* This frame was requested with an SREJ, but 7133 * some expected retransmitted frames are 7134 * missing. Request retransmission of missing 7135 * SREJ'd frames. 7136 */ 7137 skb_queue_tail(&chan->srej_q, skb); 7138 skb_in_use = true; 7139 BT_DBG("Queued %p (queue len %d)", skb, 7140 skb_queue_len(&chan->srej_q)); 7141 7142 l2cap_pass_to_tx(chan, control); 7143 l2cap_send_srej_list(chan, control->txseq); 7144 break; 7145 case L2CAP_TXSEQ_DUPLICATE_SREJ: 7146 /* We've already queued this frame. Drop this copy. */ 7147 l2cap_pass_to_tx(chan, control); 7148 break; 7149 case L2CAP_TXSEQ_DUPLICATE: 7150 /* Expecting a later sequence number, so this frame 7151 * was already received. Ignore it completely. 7152 */ 7153 break; 7154 case L2CAP_TXSEQ_INVALID_IGNORE: 7155 break; 7156 case L2CAP_TXSEQ_INVALID: 7157 default: 7158 l2cap_send_disconn_req(chan, ECONNRESET); 7159 break; 7160 } 7161 break; 7162 case L2CAP_EV_RECV_RR: 7163 l2cap_pass_to_tx(chan, control); 7164 if (control->final) { 7165 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 7166 7167 if (!test_and_clear_bit(CONN_REJ_ACT, 7168 &chan->conn_state)) { 7169 control->final = 0; 7170 l2cap_retransmit_all(chan, control); 7171 } 7172 7173 l2cap_ertm_send(chan); 7174 } else if (control->poll) { 7175 if (test_and_clear_bit(CONN_REMOTE_BUSY, 7176 &chan->conn_state) && 7177 chan->unacked_frames) { 7178 __set_retrans_timer(chan); 7179 } 7180 7181 set_bit(CONN_SEND_FBIT, &chan->conn_state); 7182 l2cap_send_srej_tail(chan); 7183 } else { 7184 if (test_and_clear_bit(CONN_REMOTE_BUSY, 7185 &chan->conn_state) && 7186 chan->unacked_frames) 7187 __set_retrans_timer(chan); 7188 7189 l2cap_send_ack(chan); 7190 } 7191 break; 7192 case L2CAP_EV_RECV_RNR: 7193 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 7194 l2cap_pass_to_tx(chan, control); 7195 if (control->poll) { 7196 l2cap_send_srej_tail(chan); 7197 } else { 7198 struct l2cap_ctrl rr_control; 7199 memset(&rr_control, 0, sizeof(rr_control)); 7200 rr_control.sframe = 1; 7201 rr_control.super = L2CAP_SUPER_RR; 7202 rr_control.reqseq = chan->buffer_seq; 7203 l2cap_send_sframe(chan, &rr_control); 7204 } 7205 7206 break; 7207 case L2CAP_EV_RECV_REJ: 7208 l2cap_handle_rej(chan, control); 7209 break; 7210 case L2CAP_EV_RECV_SREJ: 7211 l2cap_handle_srej(chan, control); 7212 break; 7213 } 7214 7215 if (skb && !skb_in_use) { 7216 BT_DBG("Freeing %p", skb); 7217 kfree_skb(skb); 7218 } 7219 7220 return err; 7221 } 7222 7223 static int l2cap_finish_move(struct l2cap_chan *chan) 7224 { 7225 BT_DBG("chan %p", chan); 7226 7227 chan->rx_state = L2CAP_RX_STATE_RECV; 7228 7229 if (chan->hs_hcon) 7230 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; 7231 else 7232 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; 7233 7234 return l2cap_resegment(chan); 7235 } 7236 7237 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, 7238 struct l2cap_ctrl *control, 7239 struct sk_buff *skb, u8 event) 7240 { 7241 int err; 7242 7243 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 7244 event); 7245 7246 if (!control->poll) 7247 return -EPROTO; 7248 7249 l2cap_process_reqseq(chan, control->reqseq); 7250 7251 if (!skb_queue_empty(&chan->tx_q)) 7252 chan->tx_send_head = skb_peek(&chan->tx_q); 7253 else 7254 chan->tx_send_head = NULL; 7255 7256 /* Rewind next_tx_seq to the point expected 7257 * by the receiver. 7258 */ 7259 chan->next_tx_seq = control->reqseq; 7260 chan->unacked_frames = 0; 7261 7262 err = l2cap_finish_move(chan); 7263 if (err) 7264 return err; 7265 7266 set_bit(CONN_SEND_FBIT, &chan->conn_state); 7267 l2cap_send_i_or_rr_or_rnr(chan); 7268 7269 if (event == L2CAP_EV_RECV_IFRAME) 7270 return -EPROTO; 7271 7272 return l2cap_rx_state_recv(chan, control, NULL, event); 7273 } 7274 7275 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, 7276 struct l2cap_ctrl *control, 7277 struct sk_buff *skb, u8 event) 7278 { 7279 int err; 7280 7281 if (!control->final) 7282 return -EPROTO; 7283 7284 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 7285 7286 chan->rx_state = L2CAP_RX_STATE_RECV; 7287 l2cap_process_reqseq(chan, control->reqseq); 7288 7289 if (!skb_queue_empty(&chan->tx_q)) 7290 chan->tx_send_head = skb_peek(&chan->tx_q); 7291 else 7292 chan->tx_send_head = NULL; 7293 7294 /* Rewind next_tx_seq to the point expected 7295 * by the receiver. 7296 */ 7297 chan->next_tx_seq = control->reqseq; 7298 chan->unacked_frames = 0; 7299 7300 if (chan->hs_hcon) 7301 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; 7302 else 7303 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; 7304 7305 err = l2cap_resegment(chan); 7306 7307 if (!err) 7308 err = l2cap_rx_state_recv(chan, control, skb, event); 7309 7310 return err; 7311 } 7312 7313 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) 7314 { 7315 /* Make sure reqseq is for a packet that has been sent but not acked */ 7316 u16 unacked; 7317 7318 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); 7319 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; 7320 } 7321 7322 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 7323 struct sk_buff *skb, u8 event) 7324 { 7325 int err = 0; 7326 7327 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, 7328 control, skb, event, chan->rx_state); 7329 7330 if (__valid_reqseq(chan, control->reqseq)) { 7331 switch (chan->rx_state) { 7332 case L2CAP_RX_STATE_RECV: 7333 err = l2cap_rx_state_recv(chan, control, skb, event); 7334 break; 7335 case L2CAP_RX_STATE_SREJ_SENT: 7336 err = l2cap_rx_state_srej_sent(chan, control, skb, 7337 event); 7338 break; 7339 case L2CAP_RX_STATE_WAIT_P: 7340 err = l2cap_rx_state_wait_p(chan, control, skb, event); 7341 break; 7342 case L2CAP_RX_STATE_WAIT_F: 7343 err = l2cap_rx_state_wait_f(chan, control, skb, event); 7344 break; 7345 default: 7346 /* shut it down */ 7347 break; 7348 } 7349 } else { 7350 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", 7351 control->reqseq, chan->next_tx_seq, 7352 chan->expected_ack_seq); 7353 l2cap_send_disconn_req(chan, ECONNRESET); 7354 } 7355 7356 return err; 7357 } 7358 7359 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 7360 struct sk_buff *skb) 7361 { 7362 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store 7363 * the txseq field in advance to use it after l2cap_reassemble_sdu 7364 * returns and to avoid the race condition, for example: 7365 * 7366 * The current thread calls: 7367 * l2cap_reassemble_sdu 7368 * chan->ops->recv == l2cap_sock_recv_cb 7369 * __sock_queue_rcv_skb 7370 * Another thread calls: 7371 * bt_sock_recvmsg 7372 * skb_recv_datagram 7373 * skb_free_datagram 7374 * Then the current thread tries to access control, but it was freed by 7375 * skb_free_datagram. 7376 */ 7377 u16 txseq = control->txseq; 7378 7379 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, 7380 chan->rx_state); 7381 7382 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { 7383 l2cap_pass_to_tx(chan, control); 7384 7385 BT_DBG("buffer_seq %u->%u", chan->buffer_seq, 7386 __next_seq(chan, chan->buffer_seq)); 7387 7388 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 7389 7390 l2cap_reassemble_sdu(chan, skb, control); 7391 } else { 7392 if (chan->sdu) { 7393 kfree_skb(chan->sdu); 7394 chan->sdu = NULL; 7395 } 7396 chan->sdu_last_frag = NULL; 7397 chan->sdu_len = 0; 7398 7399 if (skb) { 7400 BT_DBG("Freeing %p", skb); 7401 kfree_skb(skb); 7402 } 7403 } 7404 7405 chan->last_acked_seq = txseq; 7406 chan->expected_tx_seq = __next_seq(chan, txseq); 7407 7408 return 0; 7409 } 7410 7411 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 7412 { 7413 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; 7414 u16 len; 7415 u8 event; 7416 7417 __unpack_control(chan, skb); 7418 7419 len = skb->len; 7420 7421 /* 7422 * We can just drop the corrupted I-frame here. 7423 * Receiver will miss it and start proper recovery 7424 * procedures and ask for retransmission. 7425 */ 7426 if (l2cap_check_fcs(chan, skb)) 7427 goto drop; 7428 7429 if (!control->sframe && control->sar == L2CAP_SAR_START) 7430 len -= L2CAP_SDULEN_SIZE; 7431 7432 if (chan->fcs == L2CAP_FCS_CRC16) 7433 len -= L2CAP_FCS_SIZE; 7434 7435 if (len > chan->mps) { 7436 l2cap_send_disconn_req(chan, ECONNRESET); 7437 goto drop; 7438 } 7439 7440 if (chan->ops->filter) { 7441 if (chan->ops->filter(chan, skb)) 7442 goto drop; 7443 } 7444 7445 if (!control->sframe) { 7446 int err; 7447 7448 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", 7449 control->sar, control->reqseq, control->final, 7450 control->txseq); 7451 7452 /* Validate F-bit - F=0 always valid, F=1 only 7453 * valid in TX WAIT_F 7454 */ 7455 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) 7456 goto drop; 7457 7458 if (chan->mode != L2CAP_MODE_STREAMING) { 7459 event = L2CAP_EV_RECV_IFRAME; 7460 err = l2cap_rx(chan, control, skb, event); 7461 } else { 7462 err = l2cap_stream_rx(chan, control, skb); 7463 } 7464 7465 if (err) 7466 l2cap_send_disconn_req(chan, ECONNRESET); 7467 } else { 7468 const u8 rx_func_to_event[4] = { 7469 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, 7470 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ 7471 }; 7472 7473 /* Only I-frames are expected in streaming mode */ 7474 if (chan->mode == L2CAP_MODE_STREAMING) 7475 goto drop; 7476 7477 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", 7478 control->reqseq, control->final, control->poll, 7479 control->super); 7480 7481 if (len != 0) { 7482 BT_ERR("Trailing bytes: %d in sframe", len); 7483 l2cap_send_disconn_req(chan, ECONNRESET); 7484 goto drop; 7485 } 7486 7487 /* Validate F and P bits */ 7488 if (control->final && (control->poll || 7489 chan->tx_state != L2CAP_TX_STATE_WAIT_F)) 7490 goto drop; 7491 7492 event = rx_func_to_event[control->super]; 7493 if (l2cap_rx(chan, control, skb, event)) 7494 l2cap_send_disconn_req(chan, ECONNRESET); 7495 } 7496 7497 return 0; 7498 7499 drop: 7500 kfree_skb(skb); 7501 return 0; 7502 } 7503 7504 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) 7505 { 7506 struct l2cap_conn *conn = chan->conn; 7507 struct l2cap_le_credits pkt; 7508 u16 return_credits; 7509 7510 return_credits = (chan->imtu / chan->mps) + 1; 7511 7512 if (chan->rx_credits >= return_credits) 7513 return; 7514 7515 return_credits -= chan->rx_credits; 7516 7517 BT_DBG("chan %p returning %u credits to sender", chan, return_credits); 7518 7519 chan->rx_credits += return_credits; 7520 7521 pkt.cid = cpu_to_le16(chan->scid); 7522 pkt.credits = cpu_to_le16(return_credits); 7523 7524 chan->ident = l2cap_get_ident(conn); 7525 7526 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); 7527 } 7528 7529 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) 7530 { 7531 int err; 7532 7533 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); 7534 7535 /* Wait recv to confirm reception before updating the credits */ 7536 err = chan->ops->recv(chan, skb); 7537 7538 /* Update credits whenever an SDU is received */ 7539 l2cap_chan_le_send_credits(chan); 7540 7541 return err; 7542 } 7543 7544 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 7545 { 7546 int err; 7547 7548 if (!chan->rx_credits) { 7549 BT_ERR("No credits to receive LE L2CAP data"); 7550 l2cap_send_disconn_req(chan, ECONNRESET); 7551 return -ENOBUFS; 7552 } 7553 7554 if (chan->imtu < skb->len) { 7555 BT_ERR("Too big LE L2CAP PDU"); 7556 return -ENOBUFS; 7557 } 7558 7559 chan->rx_credits--; 7560 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); 7561 7562 /* Update if remote had run out of credits, this should only happens 7563 * if the remote is not using the entire MPS. 7564 */ 7565 if (!chan->rx_credits) 7566 l2cap_chan_le_send_credits(chan); 7567 7568 err = 0; 7569 7570 if (!chan->sdu) { 7571 u16 sdu_len; 7572 7573 sdu_len = get_unaligned_le16(skb->data); 7574 skb_pull(skb, L2CAP_SDULEN_SIZE); 7575 7576 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", 7577 sdu_len, skb->len, chan->imtu); 7578 7579 if (sdu_len > chan->imtu) { 7580 BT_ERR("Too big LE L2CAP SDU length received"); 7581 err = -EMSGSIZE; 7582 goto failed; 7583 } 7584 7585 if (skb->len > sdu_len) { 7586 BT_ERR("Too much LE L2CAP data received"); 7587 err = -EINVAL; 7588 goto failed; 7589 } 7590 7591 if (skb->len == sdu_len) 7592 return l2cap_ecred_recv(chan, skb); 7593 7594 chan->sdu = skb; 7595 chan->sdu_len = sdu_len; 7596 chan->sdu_last_frag = skb; 7597 7598 /* Detect if remote is not able to use the selected MPS */ 7599 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { 7600 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; 7601 7602 /* Adjust the number of credits */ 7603 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); 7604 chan->mps = mps_len; 7605 l2cap_chan_le_send_credits(chan); 7606 } 7607 7608 return 0; 7609 } 7610 7611 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", 7612 chan->sdu->len, skb->len, chan->sdu_len); 7613 7614 if (chan->sdu->len + skb->len > chan->sdu_len) { 7615 BT_ERR("Too much LE L2CAP data received"); 7616 err = -EINVAL; 7617 goto failed; 7618 } 7619 7620 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); 7621 skb = NULL; 7622 7623 if (chan->sdu->len == chan->sdu_len) { 7624 err = l2cap_ecred_recv(chan, chan->sdu); 7625 if (!err) { 7626 chan->sdu = NULL; 7627 chan->sdu_last_frag = NULL; 7628 chan->sdu_len = 0; 7629 } 7630 } 7631 7632 failed: 7633 if (err) { 7634 kfree_skb(skb); 7635 kfree_skb(chan->sdu); 7636 chan->sdu = NULL; 7637 chan->sdu_last_frag = NULL; 7638 chan->sdu_len = 0; 7639 } 7640 7641 /* We can't return an error here since we took care of the skb 7642 * freeing internally. An error return would cause the caller to 7643 * do a double-free of the skb. 7644 */ 7645 return 0; 7646 } 7647 7648 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, 7649 struct sk_buff *skb) 7650 { 7651 struct l2cap_chan *chan; 7652 7653 chan = l2cap_get_chan_by_scid(conn, cid); 7654 if (!chan) { 7655 if (cid == L2CAP_CID_A2MP) { 7656 chan = a2mp_channel_create(conn, skb); 7657 if (!chan) { 7658 kfree_skb(skb); 7659 return; 7660 } 7661 7662 l2cap_chan_hold(chan); 7663 l2cap_chan_lock(chan); 7664 } else { 7665 BT_DBG("unknown cid 0x%4.4x", cid); 7666 /* Drop packet and return */ 7667 kfree_skb(skb); 7668 return; 7669 } 7670 } 7671 7672 BT_DBG("chan %p, len %d", chan, skb->len); 7673 7674 /* If we receive data on a fixed channel before the info req/rsp 7675 * procedure is done simply assume that the channel is supported 7676 * and mark it as ready. 7677 */ 7678 if (chan->chan_type == L2CAP_CHAN_FIXED) 7679 l2cap_chan_ready(chan); 7680 7681 if (chan->state != BT_CONNECTED) 7682 goto drop; 7683 7684 switch (chan->mode) { 7685 case L2CAP_MODE_LE_FLOWCTL: 7686 case L2CAP_MODE_EXT_FLOWCTL: 7687 if (l2cap_ecred_data_rcv(chan, skb) < 0) 7688 goto drop; 7689 7690 goto done; 7691 7692 case L2CAP_MODE_BASIC: 7693 /* If socket recv buffers overflows we drop data here 7694 * which is *bad* because L2CAP has to be reliable. 7695 * But we don't have any other choice. L2CAP doesn't 7696 * provide flow control mechanism. */ 7697 7698 if (chan->imtu < skb->len) { 7699 BT_ERR("Dropping L2CAP data: receive buffer overflow"); 7700 goto drop; 7701 } 7702 7703 if (!chan->ops->recv(chan, skb)) 7704 goto done; 7705 break; 7706 7707 case L2CAP_MODE_ERTM: 7708 case L2CAP_MODE_STREAMING: 7709 l2cap_data_rcv(chan, skb); 7710 goto done; 7711 7712 default: 7713 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); 7714 break; 7715 } 7716 7717 drop: 7718 kfree_skb(skb); 7719 7720 done: 7721 l2cap_chan_unlock(chan); 7722 l2cap_chan_put(chan); 7723 } 7724 7725 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, 7726 struct sk_buff *skb) 7727 { 7728 struct hci_conn *hcon = conn->hcon; 7729 struct l2cap_chan *chan; 7730 7731 if (hcon->type != ACL_LINK) 7732 goto free_skb; 7733 7734 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, 7735 ACL_LINK); 7736 if (!chan) 7737 goto free_skb; 7738 7739 BT_DBG("chan %p, len %d", chan, skb->len); 7740 7741 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 7742 goto drop; 7743 7744 if (chan->imtu < skb->len) 7745 goto drop; 7746 7747 /* Store remote BD_ADDR and PSM for msg_name */ 7748 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); 7749 bt_cb(skb)->l2cap.psm = psm; 7750 7751 if (!chan->ops->recv(chan, skb)) { 7752 l2cap_chan_put(chan); 7753 return; 7754 } 7755 7756 drop: 7757 l2cap_chan_put(chan); 7758 free_skb: 7759 kfree_skb(skb); 7760 } 7761 7762 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 7763 { 7764 struct l2cap_hdr *lh = (void *) skb->data; 7765 struct hci_conn *hcon = conn->hcon; 7766 u16 cid, len; 7767 __le16 psm; 7768 7769 if (hcon->state != BT_CONNECTED) { 7770 BT_DBG("queueing pending rx skb"); 7771 skb_queue_tail(&conn->pending_rx, skb); 7772 return; 7773 } 7774 7775 skb_pull(skb, L2CAP_HDR_SIZE); 7776 cid = __le16_to_cpu(lh->cid); 7777 len = __le16_to_cpu(lh->len); 7778 7779 if (len != skb->len) { 7780 kfree_skb(skb); 7781 return; 7782 } 7783 7784 /* Since we can't actively block incoming LE connections we must 7785 * at least ensure that we ignore incoming data from them. 7786 */ 7787 if (hcon->type == LE_LINK && 7788 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst, 7789 bdaddr_dst_type(hcon))) { 7790 kfree_skb(skb); 7791 return; 7792 } 7793 7794 BT_DBG("len %d, cid 0x%4.4x", len, cid); 7795 7796 switch (cid) { 7797 case L2CAP_CID_SIGNALING: 7798 l2cap_sig_channel(conn, skb); 7799 break; 7800 7801 case L2CAP_CID_CONN_LESS: 7802 psm = get_unaligned((__le16 *) skb->data); 7803 skb_pull(skb, L2CAP_PSMLEN_SIZE); 7804 l2cap_conless_channel(conn, psm, skb); 7805 break; 7806 7807 case L2CAP_CID_LE_SIGNALING: 7808 l2cap_le_sig_channel(conn, skb); 7809 break; 7810 7811 default: 7812 l2cap_data_channel(conn, cid, skb); 7813 break; 7814 } 7815 } 7816 7817 static void process_pending_rx(struct work_struct *work) 7818 { 7819 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 7820 pending_rx_work); 7821 struct sk_buff *skb; 7822 7823 BT_DBG(""); 7824 7825 while ((skb = skb_dequeue(&conn->pending_rx))) 7826 l2cap_recv_frame(conn, skb); 7827 } 7828 7829 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) 7830 { 7831 struct l2cap_conn *conn = hcon->l2cap_data; 7832 struct hci_chan *hchan; 7833 7834 if (conn) 7835 return conn; 7836 7837 hchan = hci_chan_create(hcon); 7838 if (!hchan) 7839 return NULL; 7840 7841 conn = kzalloc(sizeof(*conn), GFP_KERNEL); 7842 if (!conn) { 7843 hci_chan_del(hchan); 7844 return NULL; 7845 } 7846 7847 kref_init(&conn->ref); 7848 hcon->l2cap_data = conn; 7849 conn->hcon = hci_conn_get(hcon); 7850 conn->hchan = hchan; 7851 7852 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 7853 7854 switch (hcon->type) { 7855 case LE_LINK: 7856 if (hcon->hdev->le_mtu) { 7857 conn->mtu = hcon->hdev->le_mtu; 7858 break; 7859 } 7860 fallthrough; 7861 default: 7862 conn->mtu = hcon->hdev->acl_mtu; 7863 break; 7864 } 7865 7866 conn->feat_mask = 0; 7867 7868 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; 7869 7870 if (hcon->type == ACL_LINK && 7871 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED)) 7872 conn->local_fixed_chan |= L2CAP_FC_A2MP; 7873 7874 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && 7875 (bredr_sc_enabled(hcon->hdev) || 7876 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) 7877 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; 7878 7879 mutex_init(&conn->ident_lock); 7880 mutex_init(&conn->chan_lock); 7881 7882 INIT_LIST_HEAD(&conn->chan_l); 7883 INIT_LIST_HEAD(&conn->users); 7884 7885 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); 7886 7887 skb_queue_head_init(&conn->pending_rx); 7888 INIT_WORK(&conn->pending_rx_work, process_pending_rx); 7889 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr); 7890 7891 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 7892 7893 return conn; 7894 } 7895 7896 static bool is_valid_psm(u16 psm, u8 dst_type) 7897 { 7898 if (!psm) 7899 return false; 7900 7901 if (bdaddr_type_is_le(dst_type)) 7902 return (psm <= 0x00ff); 7903 7904 /* PSM must be odd and lsb of upper byte must be 0 */ 7905 return ((psm & 0x0101) == 0x0001); 7906 } 7907 7908 struct l2cap_chan_data { 7909 struct l2cap_chan *chan; 7910 struct pid *pid; 7911 int count; 7912 }; 7913 7914 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) 7915 { 7916 struct l2cap_chan_data *d = data; 7917 struct pid *pid; 7918 7919 if (chan == d->chan) 7920 return; 7921 7922 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) 7923 return; 7924 7925 pid = chan->ops->get_peer_pid(chan); 7926 7927 /* Only count deferred channels with the same PID/PSM */ 7928 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || 7929 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) 7930 return; 7931 7932 d->count++; 7933 } 7934 7935 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, 7936 bdaddr_t *dst, u8 dst_type) 7937 { 7938 struct l2cap_conn *conn; 7939 struct hci_conn *hcon; 7940 struct hci_dev *hdev; 7941 int err; 7942 7943 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, 7944 dst, dst_type, __le16_to_cpu(psm), chan->mode); 7945 7946 hdev = hci_get_route(dst, &chan->src, chan->src_type); 7947 if (!hdev) 7948 return -EHOSTUNREACH; 7949 7950 hci_dev_lock(hdev); 7951 7952 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && 7953 chan->chan_type != L2CAP_CHAN_RAW) { 7954 err = -EINVAL; 7955 goto done; 7956 } 7957 7958 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { 7959 err = -EINVAL; 7960 goto done; 7961 } 7962 7963 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { 7964 err = -EINVAL; 7965 goto done; 7966 } 7967 7968 switch (chan->mode) { 7969 case L2CAP_MODE_BASIC: 7970 break; 7971 case L2CAP_MODE_LE_FLOWCTL: 7972 break; 7973 case L2CAP_MODE_EXT_FLOWCTL: 7974 if (!enable_ecred) { 7975 err = -EOPNOTSUPP; 7976 goto done; 7977 } 7978 break; 7979 case L2CAP_MODE_ERTM: 7980 case L2CAP_MODE_STREAMING: 7981 if (!disable_ertm) 7982 break; 7983 fallthrough; 7984 default: 7985 err = -EOPNOTSUPP; 7986 goto done; 7987 } 7988 7989 switch (chan->state) { 7990 case BT_CONNECT: 7991 case BT_CONNECT2: 7992 case BT_CONFIG: 7993 /* Already connecting */ 7994 err = 0; 7995 goto done; 7996 7997 case BT_CONNECTED: 7998 /* Already connected */ 7999 err = -EISCONN; 8000 goto done; 8001 8002 case BT_OPEN: 8003 case BT_BOUND: 8004 /* Can connect */ 8005 break; 8006 8007 default: 8008 err = -EBADFD; 8009 goto done; 8010 } 8011 8012 /* Set destination address and psm */ 8013 bacpy(&chan->dst, dst); 8014 chan->dst_type = dst_type; 8015 8016 chan->psm = psm; 8017 chan->dcid = cid; 8018 8019 if (bdaddr_type_is_le(dst_type)) { 8020 /* Convert from L2CAP channel address type to HCI address type 8021 */ 8022 if (dst_type == BDADDR_LE_PUBLIC) 8023 dst_type = ADDR_LE_DEV_PUBLIC; 8024 else 8025 dst_type = ADDR_LE_DEV_RANDOM; 8026 8027 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 8028 hcon = hci_connect_le(hdev, dst, dst_type, false, 8029 chan->sec_level, 8030 HCI_LE_CONN_TIMEOUT, 8031 HCI_ROLE_SLAVE); 8032 else 8033 hcon = hci_connect_le_scan(hdev, dst, dst_type, 8034 chan->sec_level, 8035 HCI_LE_CONN_TIMEOUT, 8036 CONN_REASON_L2CAP_CHAN); 8037 8038 } else { 8039 u8 auth_type = l2cap_get_auth_type(chan); 8040 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, 8041 CONN_REASON_L2CAP_CHAN); 8042 } 8043 8044 if (IS_ERR(hcon)) { 8045 err = PTR_ERR(hcon); 8046 goto done; 8047 } 8048 8049 conn = l2cap_conn_add(hcon); 8050 if (!conn) { 8051 hci_conn_drop(hcon); 8052 err = -ENOMEM; 8053 goto done; 8054 } 8055 8056 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { 8057 struct l2cap_chan_data data; 8058 8059 data.chan = chan; 8060 data.pid = chan->ops->get_peer_pid(chan); 8061 data.count = 1; 8062 8063 l2cap_chan_list(conn, l2cap_chan_by_pid, &data); 8064 8065 /* Check if there isn't too many channels being connected */ 8066 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { 8067 hci_conn_drop(hcon); 8068 err = -EPROTO; 8069 goto done; 8070 } 8071 } 8072 8073 mutex_lock(&conn->chan_lock); 8074 l2cap_chan_lock(chan); 8075 8076 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { 8077 hci_conn_drop(hcon); 8078 err = -EBUSY; 8079 goto chan_unlock; 8080 } 8081 8082 /* Update source addr of the socket */ 8083 bacpy(&chan->src, &hcon->src); 8084 chan->src_type = bdaddr_src_type(hcon); 8085 8086 __l2cap_chan_add(conn, chan); 8087 8088 /* l2cap_chan_add takes its own ref so we can drop this one */ 8089 hci_conn_drop(hcon); 8090 8091 l2cap_state_change(chan, BT_CONNECT); 8092 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 8093 8094 /* Release chan->sport so that it can be reused by other 8095 * sockets (as it's only used for listening sockets). 8096 */ 8097 write_lock(&chan_list_lock); 8098 chan->sport = 0; 8099 write_unlock(&chan_list_lock); 8100 8101 if (hcon->state == BT_CONNECTED) { 8102 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 8103 __clear_chan_timer(chan); 8104 if (l2cap_chan_check_security(chan, true)) 8105 l2cap_state_change(chan, BT_CONNECTED); 8106 } else 8107 l2cap_do_start(chan); 8108 } 8109 8110 err = 0; 8111 8112 chan_unlock: 8113 l2cap_chan_unlock(chan); 8114 mutex_unlock(&conn->chan_lock); 8115 done: 8116 hci_dev_unlock(hdev); 8117 hci_dev_put(hdev); 8118 return err; 8119 } 8120 EXPORT_SYMBOL_GPL(l2cap_chan_connect); 8121 8122 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) 8123 { 8124 struct l2cap_conn *conn = chan->conn; 8125 struct { 8126 struct l2cap_ecred_reconf_req req; 8127 __le16 scid; 8128 } pdu; 8129 8130 pdu.req.mtu = cpu_to_le16(chan->imtu); 8131 pdu.req.mps = cpu_to_le16(chan->mps); 8132 pdu.scid = cpu_to_le16(chan->scid); 8133 8134 chan->ident = l2cap_get_ident(conn); 8135 8136 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, 8137 sizeof(pdu), &pdu); 8138 } 8139 8140 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) 8141 { 8142 if (chan->imtu > mtu) 8143 return -EINVAL; 8144 8145 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); 8146 8147 chan->imtu = mtu; 8148 8149 l2cap_ecred_reconfigure(chan); 8150 8151 return 0; 8152 } 8153 8154 /* ---- L2CAP interface with lower layer (HCI) ---- */ 8155 8156 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 8157 { 8158 int exact = 0, lm1 = 0, lm2 = 0; 8159 struct l2cap_chan *c; 8160 8161 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); 8162 8163 /* Find listening sockets and check their link_mode */ 8164 read_lock(&chan_list_lock); 8165 list_for_each_entry(c, &chan_list, global_l) { 8166 if (c->state != BT_LISTEN) 8167 continue; 8168 8169 if (!bacmp(&c->src, &hdev->bdaddr)) { 8170 lm1 |= HCI_LM_ACCEPT; 8171 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 8172 lm1 |= HCI_LM_MASTER; 8173 exact++; 8174 } else if (!bacmp(&c->src, BDADDR_ANY)) { 8175 lm2 |= HCI_LM_ACCEPT; 8176 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 8177 lm2 |= HCI_LM_MASTER; 8178 } 8179 } 8180 read_unlock(&chan_list_lock); 8181 8182 return exact ? lm1 : lm2; 8183 } 8184 8185 /* Find the next fixed channel in BT_LISTEN state, continue iteration 8186 * from an existing channel in the list or from the beginning of the 8187 * global list (by passing NULL as first parameter). 8188 */ 8189 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, 8190 struct hci_conn *hcon) 8191 { 8192 u8 src_type = bdaddr_src_type(hcon); 8193 8194 read_lock(&chan_list_lock); 8195 8196 if (c) 8197 c = list_next_entry(c, global_l); 8198 else 8199 c = list_entry(chan_list.next, typeof(*c), global_l); 8200 8201 list_for_each_entry_from(c, &chan_list, global_l) { 8202 if (c->chan_type != L2CAP_CHAN_FIXED) 8203 continue; 8204 if (c->state != BT_LISTEN) 8205 continue; 8206 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) 8207 continue; 8208 if (src_type != c->src_type) 8209 continue; 8210 8211 c = l2cap_chan_hold_unless_zero(c); 8212 read_unlock(&chan_list_lock); 8213 return c; 8214 } 8215 8216 read_unlock(&chan_list_lock); 8217 8218 return NULL; 8219 } 8220 8221 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 8222 { 8223 struct hci_dev *hdev = hcon->hdev; 8224 struct l2cap_conn *conn; 8225 struct l2cap_chan *pchan; 8226 u8 dst_type; 8227 8228 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 8229 return; 8230 8231 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); 8232 8233 if (status) { 8234 l2cap_conn_del(hcon, bt_to_errno(status)); 8235 return; 8236 } 8237 8238 conn = l2cap_conn_add(hcon); 8239 if (!conn) 8240 return; 8241 8242 dst_type = bdaddr_dst_type(hcon); 8243 8244 /* If device is blocked, do not create channels for it */ 8245 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type)) 8246 return; 8247 8248 /* Find fixed channels and notify them of the new connection. We 8249 * use multiple individual lookups, continuing each time where 8250 * we left off, because the list lock would prevent calling the 8251 * potentially sleeping l2cap_chan_lock() function. 8252 */ 8253 pchan = l2cap_global_fixed_chan(NULL, hcon); 8254 while (pchan) { 8255 struct l2cap_chan *chan, *next; 8256 8257 /* Client fixed channels should override server ones */ 8258 if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) 8259 goto next; 8260 8261 l2cap_chan_lock(pchan); 8262 chan = pchan->ops->new_connection(pchan); 8263 if (chan) { 8264 bacpy(&chan->src, &hcon->src); 8265 bacpy(&chan->dst, &hcon->dst); 8266 chan->src_type = bdaddr_src_type(hcon); 8267 chan->dst_type = dst_type; 8268 8269 __l2cap_chan_add(conn, chan); 8270 } 8271 8272 l2cap_chan_unlock(pchan); 8273 next: 8274 next = l2cap_global_fixed_chan(pchan, hcon); 8275 l2cap_chan_put(pchan); 8276 pchan = next; 8277 } 8278 8279 l2cap_conn_ready(conn); 8280 } 8281 8282 int l2cap_disconn_ind(struct hci_conn *hcon) 8283 { 8284 struct l2cap_conn *conn = hcon->l2cap_data; 8285 8286 BT_DBG("hcon %p", hcon); 8287 8288 if (!conn) 8289 return HCI_ERROR_REMOTE_USER_TERM; 8290 return conn->disc_reason; 8291 } 8292 8293 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) 8294 { 8295 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 8296 return; 8297 8298 BT_DBG("hcon %p reason %d", hcon, reason); 8299 8300 l2cap_conn_del(hcon, bt_to_errno(reason)); 8301 } 8302 8303 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 8304 { 8305 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 8306 return; 8307 8308 if (encrypt == 0x00) { 8309 if (chan->sec_level == BT_SECURITY_MEDIUM) { 8310 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 8311 } else if (chan->sec_level == BT_SECURITY_HIGH || 8312 chan->sec_level == BT_SECURITY_FIPS) 8313 l2cap_chan_close(chan, ECONNREFUSED); 8314 } else { 8315 if (chan->sec_level == BT_SECURITY_MEDIUM) 8316 __clear_chan_timer(chan); 8317 } 8318 } 8319 8320 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 8321 { 8322 struct l2cap_conn *conn = hcon->l2cap_data; 8323 struct l2cap_chan *chan; 8324 8325 if (!conn) 8326 return; 8327 8328 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); 8329 8330 mutex_lock(&conn->chan_lock); 8331 8332 list_for_each_entry(chan, &conn->chan_l, list) { 8333 l2cap_chan_lock(chan); 8334 8335 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, 8336 state_to_string(chan->state)); 8337 8338 if (chan->scid == L2CAP_CID_A2MP) { 8339 l2cap_chan_unlock(chan); 8340 continue; 8341 } 8342 8343 if (!status && encrypt) 8344 chan->sec_level = hcon->sec_level; 8345 8346 if (!__l2cap_no_conn_pending(chan)) { 8347 l2cap_chan_unlock(chan); 8348 continue; 8349 } 8350 8351 if (!status && (chan->state == BT_CONNECTED || 8352 chan->state == BT_CONFIG)) { 8353 chan->ops->resume(chan); 8354 l2cap_check_encryption(chan, encrypt); 8355 l2cap_chan_unlock(chan); 8356 continue; 8357 } 8358 8359 if (chan->state == BT_CONNECT) { 8360 if (!status && l2cap_check_enc_key_size(hcon)) 8361 l2cap_start_connection(chan); 8362 else 8363 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 8364 } else if (chan->state == BT_CONNECT2 && 8365 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || 8366 chan->mode == L2CAP_MODE_LE_FLOWCTL)) { 8367 struct l2cap_conn_rsp rsp; 8368 __u16 res, stat; 8369 8370 if (!status && l2cap_check_enc_key_size(hcon)) { 8371 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 8372 res = L2CAP_CR_PEND; 8373 stat = L2CAP_CS_AUTHOR_PEND; 8374 chan->ops->defer(chan); 8375 } else { 8376 l2cap_state_change(chan, BT_CONFIG); 8377 res = L2CAP_CR_SUCCESS; 8378 stat = L2CAP_CS_NO_INFO; 8379 } 8380 } else { 8381 l2cap_state_change(chan, BT_DISCONN); 8382 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 8383 res = L2CAP_CR_SEC_BLOCK; 8384 stat = L2CAP_CS_NO_INFO; 8385 } 8386 8387 rsp.scid = cpu_to_le16(chan->dcid); 8388 rsp.dcid = cpu_to_le16(chan->scid); 8389 rsp.result = cpu_to_le16(res); 8390 rsp.status = cpu_to_le16(stat); 8391 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 8392 sizeof(rsp), &rsp); 8393 8394 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && 8395 res == L2CAP_CR_SUCCESS) { 8396 char buf[128]; 8397 set_bit(CONF_REQ_SENT, &chan->conf_state); 8398 l2cap_send_cmd(conn, l2cap_get_ident(conn), 8399 L2CAP_CONF_REQ, 8400 l2cap_build_conf_req(chan, buf, sizeof(buf)), 8401 buf); 8402 chan->num_conf_req++; 8403 } 8404 } 8405 8406 l2cap_chan_unlock(chan); 8407 } 8408 8409 mutex_unlock(&conn->chan_lock); 8410 } 8411 8412 /* Append fragment into frame respecting the maximum len of rx_skb */ 8413 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, 8414 u16 len) 8415 { 8416 if (!conn->rx_skb) { 8417 /* Allocate skb for the complete frame (with header) */ 8418 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); 8419 if (!conn->rx_skb) 8420 return -ENOMEM; 8421 /* Init rx_len */ 8422 conn->rx_len = len; 8423 } 8424 8425 /* Copy as much as the rx_skb can hold */ 8426 len = min_t(u16, len, skb->len); 8427 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); 8428 skb_pull(skb, len); 8429 conn->rx_len -= len; 8430 8431 return len; 8432 } 8433 8434 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) 8435 { 8436 struct sk_buff *rx_skb; 8437 int len; 8438 8439 /* Append just enough to complete the header */ 8440 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); 8441 8442 /* If header could not be read just continue */ 8443 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) 8444 return len; 8445 8446 rx_skb = conn->rx_skb; 8447 len = get_unaligned_le16(rx_skb->data); 8448 8449 /* Check if rx_skb has enough space to received all fragments */ 8450 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { 8451 /* Update expected len */ 8452 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); 8453 return L2CAP_LEN_SIZE; 8454 } 8455 8456 /* Reset conn->rx_skb since it will need to be reallocated in order to 8457 * fit all fragments. 8458 */ 8459 conn->rx_skb = NULL; 8460 8461 /* Reallocates rx_skb using the exact expected length */ 8462 len = l2cap_recv_frag(conn, rx_skb, 8463 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); 8464 kfree_skb(rx_skb); 8465 8466 return len; 8467 } 8468 8469 static void l2cap_recv_reset(struct l2cap_conn *conn) 8470 { 8471 kfree_skb(conn->rx_skb); 8472 conn->rx_skb = NULL; 8473 conn->rx_len = 0; 8474 } 8475 8476 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) 8477 { 8478 struct l2cap_conn *conn = hcon->l2cap_data; 8479 int len; 8480 8481 /* For AMP controller do not create l2cap conn */ 8482 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY) 8483 goto drop; 8484 8485 if (!conn) 8486 conn = l2cap_conn_add(hcon); 8487 8488 if (!conn) 8489 goto drop; 8490 8491 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); 8492 8493 switch (flags) { 8494 case ACL_START: 8495 case ACL_START_NO_FLUSH: 8496 case ACL_COMPLETE: 8497 if (conn->rx_skb) { 8498 BT_ERR("Unexpected start frame (len %d)", skb->len); 8499 l2cap_recv_reset(conn); 8500 l2cap_conn_unreliable(conn, ECOMM); 8501 } 8502 8503 /* Start fragment may not contain the L2CAP length so just 8504 * copy the initial byte when that happens and use conn->mtu as 8505 * expected length. 8506 */ 8507 if (skb->len < L2CAP_LEN_SIZE) { 8508 l2cap_recv_frag(conn, skb, conn->mtu); 8509 break; 8510 } 8511 8512 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; 8513 8514 if (len == skb->len) { 8515 /* Complete frame received */ 8516 l2cap_recv_frame(conn, skb); 8517 return; 8518 } 8519 8520 BT_DBG("Start: total len %d, frag len %u", len, skb->len); 8521 8522 if (skb->len > len) { 8523 BT_ERR("Frame is too long (len %u, expected len %d)", 8524 skb->len, len); 8525 l2cap_conn_unreliable(conn, ECOMM); 8526 goto drop; 8527 } 8528 8529 /* Append fragment into frame (with header) */ 8530 if (l2cap_recv_frag(conn, skb, len) < 0) 8531 goto drop; 8532 8533 break; 8534 8535 case ACL_CONT: 8536 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); 8537 8538 if (!conn->rx_skb) { 8539 BT_ERR("Unexpected continuation frame (len %d)", skb->len); 8540 l2cap_conn_unreliable(conn, ECOMM); 8541 goto drop; 8542 } 8543 8544 /* Complete the L2CAP length if it has not been read */ 8545 if (conn->rx_skb->len < L2CAP_LEN_SIZE) { 8546 if (l2cap_recv_len(conn, skb) < 0) { 8547 l2cap_conn_unreliable(conn, ECOMM); 8548 goto drop; 8549 } 8550 8551 /* Header still could not be read just continue */ 8552 if (conn->rx_skb->len < L2CAP_LEN_SIZE) 8553 break; 8554 } 8555 8556 if (skb->len > conn->rx_len) { 8557 BT_ERR("Fragment is too long (len %u, expected %u)", 8558 skb->len, conn->rx_len); 8559 l2cap_recv_reset(conn); 8560 l2cap_conn_unreliable(conn, ECOMM); 8561 goto drop; 8562 } 8563 8564 /* Append fragment into frame (with header) */ 8565 l2cap_recv_frag(conn, skb, skb->len); 8566 8567 if (!conn->rx_len) { 8568 /* Complete frame received. l2cap_recv_frame 8569 * takes ownership of the skb so set the global 8570 * rx_skb pointer to NULL first. 8571 */ 8572 struct sk_buff *rx_skb = conn->rx_skb; 8573 conn->rx_skb = NULL; 8574 l2cap_recv_frame(conn, rx_skb); 8575 } 8576 break; 8577 } 8578 8579 drop: 8580 kfree_skb(skb); 8581 } 8582 8583 static struct hci_cb l2cap_cb = { 8584 .name = "L2CAP", 8585 .connect_cfm = l2cap_connect_cfm, 8586 .disconn_cfm = l2cap_disconn_cfm, 8587 .security_cfm = l2cap_security_cfm, 8588 }; 8589 8590 static int l2cap_debugfs_show(struct seq_file *f, void *p) 8591 { 8592 struct l2cap_chan *c; 8593 8594 read_lock(&chan_list_lock); 8595 8596 list_for_each_entry(c, &chan_list, global_l) { 8597 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 8598 &c->src, c->src_type, &c->dst, c->dst_type, 8599 c->state, __le16_to_cpu(c->psm), 8600 c->scid, c->dcid, c->imtu, c->omtu, 8601 c->sec_level, c->mode); 8602 } 8603 8604 read_unlock(&chan_list_lock); 8605 8606 return 0; 8607 } 8608 8609 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); 8610 8611 static struct dentry *l2cap_debugfs; 8612 8613 int __init l2cap_init(void) 8614 { 8615 int err; 8616 8617 err = l2cap_init_sockets(); 8618 if (err < 0) 8619 return err; 8620 8621 hci_register_cb(&l2cap_cb); 8622 8623 if (IS_ERR_OR_NULL(bt_debugfs)) 8624 return 0; 8625 8626 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, 8627 NULL, &l2cap_debugfs_fops); 8628 8629 return 0; 8630 } 8631 8632 void l2cap_exit(void) 8633 { 8634 debugfs_remove(l2cap_debugfs); 8635 hci_unregister_cb(&l2cap_cb); 8636 l2cap_cleanup_sockets(); 8637 } 8638 8639 module_param(disable_ertm, bool, 0644); 8640 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 8641 8642 module_param(enable_ecred, bool, 0644); 8643 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode"); 8644