1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 5 Copyright (C) 2010 Google Inc. 6 Copyright (C) 2011 ProFUSION Embedded Systems 7 Copyright (c) 2012 Code Aurora Forum. All rights reserved. 8 9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 10 11 This program is free software; you can redistribute it and/or modify 12 it under the terms of the GNU General Public License version 2 as 13 published by the Free Software Foundation; 14 15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 23 24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 26 SOFTWARE IS DISCLAIMED. 27 */ 28 29 /* Bluetooth L2CAP core. */ 30 31 #include <linux/module.h> 32 33 #include <linux/debugfs.h> 34 #include <linux/crc16.h> 35 #include <linux/filter.h> 36 37 #include <net/bluetooth/bluetooth.h> 38 #include <net/bluetooth/hci_core.h> 39 #include <net/bluetooth/l2cap.h> 40 41 #include "smp.h" 42 43 #define LE_FLOWCTL_MAX_CREDITS 65535 44 45 bool disable_ertm; 46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); 47 48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; 49 50 static LIST_HEAD(chan_list); 51 static DEFINE_RWLOCK(chan_list_lock); 52 53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 54 u8 code, u8 ident, u16 dlen, void *data); 55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 56 void *data); 57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); 58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); 59 60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 61 struct sk_buff_head *skbs, u8 event); 62 static void l2cap_retrans_timeout(struct work_struct *work); 63 static void l2cap_monitor_timeout(struct work_struct *work); 64 static void l2cap_ack_timeout(struct work_struct *work); 65 66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) 67 { 68 if (link_type == LE_LINK) { 69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC) 70 return BDADDR_LE_PUBLIC; 71 else 72 return BDADDR_LE_RANDOM; 73 } 74 75 return BDADDR_BREDR; 76 } 77 78 static inline u8 bdaddr_src_type(struct hci_conn *hcon) 79 { 80 return bdaddr_type(hcon->type, hcon->src_type); 81 } 82 83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon) 84 { 85 return bdaddr_type(hcon->type, hcon->dst_type); 86 } 87 88 /* ---- L2CAP channels ---- */ 89 90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 91 u16 cid) 92 { 93 struct l2cap_chan *c; 94 95 list_for_each_entry(c, &conn->chan_l, list) { 96 if (c->dcid == cid) 97 return c; 98 } 99 return NULL; 100 } 101 102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, 103 u16 cid) 104 { 105 struct l2cap_chan *c; 106 107 list_for_each_entry(c, &conn->chan_l, list) { 108 if (c->scid == cid) 109 return c; 110 } 111 return NULL; 112 } 113 114 /* Find channel with given SCID. 115 * Returns a reference locked channel. 116 */ 117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, 118 u16 cid) 119 { 120 struct l2cap_chan *c; 121 122 mutex_lock(&conn->chan_lock); 123 c = __l2cap_get_chan_by_scid(conn, cid); 124 if (c) { 125 /* Only lock if chan reference is not 0 */ 126 c = l2cap_chan_hold_unless_zero(c); 127 if (c) 128 l2cap_chan_lock(c); 129 } 130 mutex_unlock(&conn->chan_lock); 131 132 return c; 133 } 134 135 /* Find channel with given DCID. 136 * Returns a reference locked channel. 137 */ 138 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 139 u16 cid) 140 { 141 struct l2cap_chan *c; 142 143 mutex_lock(&conn->chan_lock); 144 c = __l2cap_get_chan_by_dcid(conn, cid); 145 if (c) { 146 /* Only lock if chan reference is not 0 */ 147 c = l2cap_chan_hold_unless_zero(c); 148 if (c) 149 l2cap_chan_lock(c); 150 } 151 mutex_unlock(&conn->chan_lock); 152 153 return c; 154 } 155 156 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, 157 u8 ident) 158 { 159 struct l2cap_chan *c; 160 161 list_for_each_entry(c, &conn->chan_l, list) { 162 if (c->ident == ident) 163 return c; 164 } 165 return NULL; 166 } 167 168 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, 169 u8 src_type) 170 { 171 struct l2cap_chan *c; 172 173 list_for_each_entry(c, &chan_list, global_l) { 174 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) 175 continue; 176 177 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) 178 continue; 179 180 if (c->sport == psm && !bacmp(&c->src, src)) 181 return c; 182 } 183 return NULL; 184 } 185 186 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) 187 { 188 int err; 189 190 write_lock(&chan_list_lock); 191 192 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) { 193 err = -EADDRINUSE; 194 goto done; 195 } 196 197 if (psm) { 198 chan->psm = psm; 199 chan->sport = psm; 200 err = 0; 201 } else { 202 u16 p, start, end, incr; 203 204 if (chan->src_type == BDADDR_BREDR) { 205 start = L2CAP_PSM_DYN_START; 206 end = L2CAP_PSM_AUTO_END; 207 incr = 2; 208 } else { 209 start = L2CAP_PSM_LE_DYN_START; 210 end = L2CAP_PSM_LE_DYN_END; 211 incr = 1; 212 } 213 214 err = -EINVAL; 215 for (p = start; p <= end; p += incr) 216 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, 217 chan->src_type)) { 218 chan->psm = cpu_to_le16(p); 219 chan->sport = cpu_to_le16(p); 220 err = 0; 221 break; 222 } 223 } 224 225 done: 226 write_unlock(&chan_list_lock); 227 return err; 228 } 229 EXPORT_SYMBOL_GPL(l2cap_add_psm); 230 231 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 232 { 233 write_lock(&chan_list_lock); 234 235 /* Override the defaults (which are for conn-oriented) */ 236 chan->omtu = L2CAP_DEFAULT_MTU; 237 chan->chan_type = L2CAP_CHAN_FIXED; 238 239 chan->scid = scid; 240 241 write_unlock(&chan_list_lock); 242 243 return 0; 244 } 245 246 static u16 l2cap_alloc_cid(struct l2cap_conn *conn) 247 { 248 u16 cid, dyn_end; 249 250 if (conn->hcon->type == LE_LINK) 251 dyn_end = L2CAP_CID_LE_DYN_END; 252 else 253 dyn_end = L2CAP_CID_DYN_END; 254 255 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { 256 if (!__l2cap_get_chan_by_scid(conn, cid)) 257 return cid; 258 } 259 260 return 0; 261 } 262 263 static void l2cap_state_change(struct l2cap_chan *chan, int state) 264 { 265 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), 266 state_to_string(state)); 267 268 chan->state = state; 269 chan->ops->state_change(chan, state, 0); 270 } 271 272 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, 273 int state, int err) 274 { 275 chan->state = state; 276 chan->ops->state_change(chan, chan->state, err); 277 } 278 279 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) 280 { 281 chan->ops->state_change(chan, chan->state, err); 282 } 283 284 static void __set_retrans_timer(struct l2cap_chan *chan) 285 { 286 if (!delayed_work_pending(&chan->monitor_timer) && 287 chan->retrans_timeout) { 288 l2cap_set_timer(chan, &chan->retrans_timer, 289 msecs_to_jiffies(chan->retrans_timeout)); 290 } 291 } 292 293 static void __set_monitor_timer(struct l2cap_chan *chan) 294 { 295 __clear_retrans_timer(chan); 296 if (chan->monitor_timeout) { 297 l2cap_set_timer(chan, &chan->monitor_timer, 298 msecs_to_jiffies(chan->monitor_timeout)); 299 } 300 } 301 302 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, 303 u16 seq) 304 { 305 struct sk_buff *skb; 306 307 skb_queue_walk(head, skb) { 308 if (bt_cb(skb)->l2cap.txseq == seq) 309 return skb; 310 } 311 312 return NULL; 313 } 314 315 /* ---- L2CAP sequence number lists ---- */ 316 317 /* For ERTM, ordered lists of sequence numbers must be tracked for 318 * SREJ requests that are received and for frames that are to be 319 * retransmitted. These seq_list functions implement a singly-linked 320 * list in an array, where membership in the list can also be checked 321 * in constant time. Items can also be added to the tail of the list 322 * and removed from the head in constant time, without further memory 323 * allocs or frees. 324 */ 325 326 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) 327 { 328 size_t alloc_size, i; 329 330 /* Allocated size is a power of 2 to map sequence numbers 331 * (which may be up to 14 bits) in to a smaller array that is 332 * sized for the negotiated ERTM transmit windows. 333 */ 334 alloc_size = roundup_pow_of_two(size); 335 336 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); 337 if (!seq_list->list) 338 return -ENOMEM; 339 340 seq_list->mask = alloc_size - 1; 341 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 342 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 343 for (i = 0; i < alloc_size; i++) 344 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 345 346 return 0; 347 } 348 349 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) 350 { 351 kfree(seq_list->list); 352 } 353 354 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, 355 u16 seq) 356 { 357 /* Constant-time check for list membership */ 358 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; 359 } 360 361 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) 362 { 363 u16 seq = seq_list->head; 364 u16 mask = seq_list->mask; 365 366 seq_list->head = seq_list->list[seq & mask]; 367 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; 368 369 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { 370 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 371 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 372 } 373 374 return seq; 375 } 376 377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) 378 { 379 u16 i; 380 381 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) 382 return; 383 384 for (i = 0; i <= seq_list->mask; i++) 385 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 386 387 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 388 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 389 } 390 391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) 392 { 393 u16 mask = seq_list->mask; 394 395 /* All appends happen in constant time */ 396 397 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) 398 return; 399 400 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) 401 seq_list->head = seq; 402 else 403 seq_list->list[seq_list->tail & mask] = seq; 404 405 seq_list->tail = seq; 406 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; 407 } 408 409 static void l2cap_chan_timeout(struct work_struct *work) 410 { 411 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 412 chan_timer.work); 413 struct l2cap_conn *conn = chan->conn; 414 int reason; 415 416 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 417 418 mutex_lock(&conn->chan_lock); 419 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling 420 * this work. No need to call l2cap_chan_hold(chan) here again. 421 */ 422 l2cap_chan_lock(chan); 423 424 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 425 reason = ECONNREFUSED; 426 else if (chan->state == BT_CONNECT && 427 chan->sec_level != BT_SECURITY_SDP) 428 reason = ECONNREFUSED; 429 else 430 reason = ETIMEDOUT; 431 432 l2cap_chan_close(chan, reason); 433 434 chan->ops->close(chan); 435 436 l2cap_chan_unlock(chan); 437 l2cap_chan_put(chan); 438 439 mutex_unlock(&conn->chan_lock); 440 } 441 442 struct l2cap_chan *l2cap_chan_create(void) 443 { 444 struct l2cap_chan *chan; 445 446 chan = kzalloc(sizeof(*chan), GFP_ATOMIC); 447 if (!chan) 448 return NULL; 449 450 skb_queue_head_init(&chan->tx_q); 451 skb_queue_head_init(&chan->srej_q); 452 mutex_init(&chan->lock); 453 454 /* Set default lock nesting level */ 455 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); 456 457 write_lock(&chan_list_lock); 458 list_add(&chan->global_l, &chan_list); 459 write_unlock(&chan_list_lock); 460 461 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); 462 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 463 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 464 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); 465 466 chan->state = BT_OPEN; 467 468 kref_init(&chan->kref); 469 470 /* This flag is cleared in l2cap_chan_ready() */ 471 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 472 473 BT_DBG("chan %p", chan); 474 475 return chan; 476 } 477 EXPORT_SYMBOL_GPL(l2cap_chan_create); 478 479 static void l2cap_chan_destroy(struct kref *kref) 480 { 481 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); 482 483 BT_DBG("chan %p", chan); 484 485 write_lock(&chan_list_lock); 486 list_del(&chan->global_l); 487 write_unlock(&chan_list_lock); 488 489 kfree(chan); 490 } 491 492 void l2cap_chan_hold(struct l2cap_chan *c) 493 { 494 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 495 496 kref_get(&c->kref); 497 } 498 499 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) 500 { 501 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 502 503 if (!kref_get_unless_zero(&c->kref)) 504 return NULL; 505 506 return c; 507 } 508 509 void l2cap_chan_put(struct l2cap_chan *c) 510 { 511 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 512 513 kref_put(&c->kref, l2cap_chan_destroy); 514 } 515 EXPORT_SYMBOL_GPL(l2cap_chan_put); 516 517 void l2cap_chan_set_defaults(struct l2cap_chan *chan) 518 { 519 chan->fcs = L2CAP_FCS_CRC16; 520 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 521 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 522 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 523 chan->remote_max_tx = chan->max_tx; 524 chan->remote_tx_win = chan->tx_win; 525 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; 526 chan->sec_level = BT_SECURITY_LOW; 527 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 528 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; 529 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 530 531 chan->conf_state = 0; 532 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 533 534 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 535 } 536 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); 537 538 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) 539 { 540 chan->sdu = NULL; 541 chan->sdu_last_frag = NULL; 542 chan->sdu_len = 0; 543 chan->tx_credits = tx_credits; 544 /* Derive MPS from connection MTU to stop HCI fragmentation */ 545 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); 546 /* Give enough credits for a full packet */ 547 chan->rx_credits = (chan->imtu / chan->mps) + 1; 548 549 skb_queue_head_init(&chan->tx_q); 550 } 551 552 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) 553 { 554 l2cap_le_flowctl_init(chan, tx_credits); 555 556 /* L2CAP implementations shall support a minimum MPS of 64 octets */ 557 if (chan->mps < L2CAP_ECRED_MIN_MPS) { 558 chan->mps = L2CAP_ECRED_MIN_MPS; 559 chan->rx_credits = (chan->imtu / chan->mps) + 1; 560 } 561 } 562 563 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 564 { 565 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 566 __le16_to_cpu(chan->psm), chan->dcid); 567 568 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 569 570 chan->conn = conn; 571 572 switch (chan->chan_type) { 573 case L2CAP_CHAN_CONN_ORIENTED: 574 /* Alloc CID for connection-oriented socket */ 575 chan->scid = l2cap_alloc_cid(conn); 576 if (conn->hcon->type == ACL_LINK) 577 chan->omtu = L2CAP_DEFAULT_MTU; 578 break; 579 580 case L2CAP_CHAN_CONN_LESS: 581 /* Connectionless socket */ 582 chan->scid = L2CAP_CID_CONN_LESS; 583 chan->dcid = L2CAP_CID_CONN_LESS; 584 chan->omtu = L2CAP_DEFAULT_MTU; 585 break; 586 587 case L2CAP_CHAN_FIXED: 588 /* Caller will set CID and CID specific MTU values */ 589 break; 590 591 default: 592 /* Raw socket can send/recv signalling messages only */ 593 chan->scid = L2CAP_CID_SIGNALING; 594 chan->dcid = L2CAP_CID_SIGNALING; 595 chan->omtu = L2CAP_DEFAULT_MTU; 596 } 597 598 chan->local_id = L2CAP_BESTEFFORT_ID; 599 chan->local_stype = L2CAP_SERV_BESTEFFORT; 600 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; 601 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; 602 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; 603 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; 604 605 l2cap_chan_hold(chan); 606 607 /* Only keep a reference for fixed channels if they requested it */ 608 if (chan->chan_type != L2CAP_CHAN_FIXED || 609 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 610 hci_conn_hold(conn->hcon); 611 612 list_add(&chan->list, &conn->chan_l); 613 } 614 615 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 616 { 617 mutex_lock(&conn->chan_lock); 618 __l2cap_chan_add(conn, chan); 619 mutex_unlock(&conn->chan_lock); 620 } 621 622 void l2cap_chan_del(struct l2cap_chan *chan, int err) 623 { 624 struct l2cap_conn *conn = chan->conn; 625 626 __clear_chan_timer(chan); 627 628 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, 629 state_to_string(chan->state)); 630 631 chan->ops->teardown(chan, err); 632 633 if (conn) { 634 /* Delete from channel list */ 635 list_del(&chan->list); 636 637 l2cap_chan_put(chan); 638 639 chan->conn = NULL; 640 641 /* Reference was only held for non-fixed channels or 642 * fixed channels that explicitly requested it using the 643 * FLAG_HOLD_HCI_CONN flag. 644 */ 645 if (chan->chan_type != L2CAP_CHAN_FIXED || 646 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 647 hci_conn_drop(conn->hcon); 648 } 649 650 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) 651 return; 652 653 switch (chan->mode) { 654 case L2CAP_MODE_BASIC: 655 break; 656 657 case L2CAP_MODE_LE_FLOWCTL: 658 case L2CAP_MODE_EXT_FLOWCTL: 659 skb_queue_purge(&chan->tx_q); 660 break; 661 662 case L2CAP_MODE_ERTM: 663 __clear_retrans_timer(chan); 664 __clear_monitor_timer(chan); 665 __clear_ack_timer(chan); 666 667 skb_queue_purge(&chan->srej_q); 668 669 l2cap_seq_list_free(&chan->srej_list); 670 l2cap_seq_list_free(&chan->retrans_list); 671 fallthrough; 672 673 case L2CAP_MODE_STREAMING: 674 skb_queue_purge(&chan->tx_q); 675 break; 676 } 677 } 678 EXPORT_SYMBOL_GPL(l2cap_chan_del); 679 680 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, 681 l2cap_chan_func_t func, void *data) 682 { 683 struct l2cap_chan *chan, *l; 684 685 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 686 if (chan->ident == id) 687 func(chan, data); 688 } 689 } 690 691 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, 692 void *data) 693 { 694 struct l2cap_chan *chan; 695 696 list_for_each_entry(chan, &conn->chan_l, list) { 697 func(chan, data); 698 } 699 } 700 701 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, 702 void *data) 703 { 704 if (!conn) 705 return; 706 707 mutex_lock(&conn->chan_lock); 708 __l2cap_chan_list(conn, func, data); 709 mutex_unlock(&conn->chan_lock); 710 } 711 712 EXPORT_SYMBOL_GPL(l2cap_chan_list); 713 714 static void l2cap_conn_update_id_addr(struct work_struct *work) 715 { 716 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 717 id_addr_timer.work); 718 struct hci_conn *hcon = conn->hcon; 719 struct l2cap_chan *chan; 720 721 mutex_lock(&conn->chan_lock); 722 723 list_for_each_entry(chan, &conn->chan_l, list) { 724 l2cap_chan_lock(chan); 725 bacpy(&chan->dst, &hcon->dst); 726 chan->dst_type = bdaddr_dst_type(hcon); 727 l2cap_chan_unlock(chan); 728 } 729 730 mutex_unlock(&conn->chan_lock); 731 } 732 733 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) 734 { 735 struct l2cap_conn *conn = chan->conn; 736 struct l2cap_le_conn_rsp rsp; 737 u16 result; 738 739 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 740 result = L2CAP_CR_LE_AUTHORIZATION; 741 else 742 result = L2CAP_CR_LE_BAD_PSM; 743 744 l2cap_state_change(chan, BT_DISCONN); 745 746 rsp.dcid = cpu_to_le16(chan->scid); 747 rsp.mtu = cpu_to_le16(chan->imtu); 748 rsp.mps = cpu_to_le16(chan->mps); 749 rsp.credits = cpu_to_le16(chan->rx_credits); 750 rsp.result = cpu_to_le16(result); 751 752 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 753 &rsp); 754 } 755 756 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) 757 { 758 l2cap_state_change(chan, BT_DISCONN); 759 760 __l2cap_ecred_conn_rsp_defer(chan); 761 } 762 763 static void l2cap_chan_connect_reject(struct l2cap_chan *chan) 764 { 765 struct l2cap_conn *conn = chan->conn; 766 struct l2cap_conn_rsp rsp; 767 u16 result; 768 769 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 770 result = L2CAP_CR_SEC_BLOCK; 771 else 772 result = L2CAP_CR_BAD_PSM; 773 774 l2cap_state_change(chan, BT_DISCONN); 775 776 rsp.scid = cpu_to_le16(chan->dcid); 777 rsp.dcid = cpu_to_le16(chan->scid); 778 rsp.result = cpu_to_le16(result); 779 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 780 781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); 782 } 783 784 void l2cap_chan_close(struct l2cap_chan *chan, int reason) 785 { 786 struct l2cap_conn *conn = chan->conn; 787 788 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 789 790 switch (chan->state) { 791 case BT_LISTEN: 792 chan->ops->teardown(chan, 0); 793 break; 794 795 case BT_CONNECTED: 796 case BT_CONFIG: 797 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 798 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 799 l2cap_send_disconn_req(chan, reason); 800 } else 801 l2cap_chan_del(chan, reason); 802 break; 803 804 case BT_CONNECT2: 805 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 806 if (conn->hcon->type == ACL_LINK) 807 l2cap_chan_connect_reject(chan); 808 else if (conn->hcon->type == LE_LINK) { 809 switch (chan->mode) { 810 case L2CAP_MODE_LE_FLOWCTL: 811 l2cap_chan_le_connect_reject(chan); 812 break; 813 case L2CAP_MODE_EXT_FLOWCTL: 814 l2cap_chan_ecred_connect_reject(chan); 815 return; 816 } 817 } 818 } 819 820 l2cap_chan_del(chan, reason); 821 break; 822 823 case BT_CONNECT: 824 case BT_DISCONN: 825 l2cap_chan_del(chan, reason); 826 break; 827 828 default: 829 chan->ops->teardown(chan, 0); 830 break; 831 } 832 } 833 EXPORT_SYMBOL(l2cap_chan_close); 834 835 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 836 { 837 switch (chan->chan_type) { 838 case L2CAP_CHAN_RAW: 839 switch (chan->sec_level) { 840 case BT_SECURITY_HIGH: 841 case BT_SECURITY_FIPS: 842 return HCI_AT_DEDICATED_BONDING_MITM; 843 case BT_SECURITY_MEDIUM: 844 return HCI_AT_DEDICATED_BONDING; 845 default: 846 return HCI_AT_NO_BONDING; 847 } 848 break; 849 case L2CAP_CHAN_CONN_LESS: 850 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { 851 if (chan->sec_level == BT_SECURITY_LOW) 852 chan->sec_level = BT_SECURITY_SDP; 853 } 854 if (chan->sec_level == BT_SECURITY_HIGH || 855 chan->sec_level == BT_SECURITY_FIPS) 856 return HCI_AT_NO_BONDING_MITM; 857 else 858 return HCI_AT_NO_BONDING; 859 break; 860 case L2CAP_CHAN_CONN_ORIENTED: 861 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { 862 if (chan->sec_level == BT_SECURITY_LOW) 863 chan->sec_level = BT_SECURITY_SDP; 864 865 if (chan->sec_level == BT_SECURITY_HIGH || 866 chan->sec_level == BT_SECURITY_FIPS) 867 return HCI_AT_NO_BONDING_MITM; 868 else 869 return HCI_AT_NO_BONDING; 870 } 871 fallthrough; 872 873 default: 874 switch (chan->sec_level) { 875 case BT_SECURITY_HIGH: 876 case BT_SECURITY_FIPS: 877 return HCI_AT_GENERAL_BONDING_MITM; 878 case BT_SECURITY_MEDIUM: 879 return HCI_AT_GENERAL_BONDING; 880 default: 881 return HCI_AT_NO_BONDING; 882 } 883 break; 884 } 885 } 886 887 /* Service level security */ 888 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) 889 { 890 struct l2cap_conn *conn = chan->conn; 891 __u8 auth_type; 892 893 if (conn->hcon->type == LE_LINK) 894 return smp_conn_security(conn->hcon, chan->sec_level); 895 896 auth_type = l2cap_get_auth_type(chan); 897 898 return hci_conn_security(conn->hcon, chan->sec_level, auth_type, 899 initiator); 900 } 901 902 static u8 l2cap_get_ident(struct l2cap_conn *conn) 903 { 904 u8 id; 905 906 /* Get next available identificator. 907 * 1 - 128 are used by kernel. 908 * 129 - 199 are reserved. 909 * 200 - 254 are used by utilities like l2ping, etc. 910 */ 911 912 mutex_lock(&conn->ident_lock); 913 914 if (++conn->tx_ident > 128) 915 conn->tx_ident = 1; 916 917 id = conn->tx_ident; 918 919 mutex_unlock(&conn->ident_lock); 920 921 return id; 922 } 923 924 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 925 void *data) 926 { 927 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 928 u8 flags; 929 930 BT_DBG("code 0x%2.2x", code); 931 932 if (!skb) 933 return; 934 935 /* Use NO_FLUSH if supported or we have an LE link (which does 936 * not support auto-flushing packets) */ 937 if (lmp_no_flush_capable(conn->hcon->hdev) || 938 conn->hcon->type == LE_LINK) 939 flags = ACL_START_NO_FLUSH; 940 else 941 flags = ACL_START; 942 943 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; 944 skb->priority = HCI_PRIO_MAX; 945 946 hci_send_acl(conn->hchan, skb, flags); 947 } 948 949 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 950 { 951 struct hci_conn *hcon = chan->conn->hcon; 952 u16 flags; 953 954 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, 955 skb->priority); 956 957 /* Use NO_FLUSH for LE links (where this is the only option) or 958 * if the BR/EDR link supports it and flushing has not been 959 * explicitly requested (through FLAG_FLUSHABLE). 960 */ 961 if (hcon->type == LE_LINK || 962 (!test_bit(FLAG_FLUSHABLE, &chan->flags) && 963 lmp_no_flush_capable(hcon->hdev))) 964 flags = ACL_START_NO_FLUSH; 965 else 966 flags = ACL_START; 967 968 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); 969 hci_send_acl(chan->conn->hchan, skb, flags); 970 } 971 972 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) 973 { 974 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; 975 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; 976 977 if (enh & L2CAP_CTRL_FRAME_TYPE) { 978 /* S-Frame */ 979 control->sframe = 1; 980 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; 981 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; 982 983 control->sar = 0; 984 control->txseq = 0; 985 } else { 986 /* I-Frame */ 987 control->sframe = 0; 988 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; 989 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; 990 991 control->poll = 0; 992 control->super = 0; 993 } 994 } 995 996 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) 997 { 998 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; 999 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; 1000 1001 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { 1002 /* S-Frame */ 1003 control->sframe = 1; 1004 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; 1005 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; 1006 1007 control->sar = 0; 1008 control->txseq = 0; 1009 } else { 1010 /* I-Frame */ 1011 control->sframe = 0; 1012 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; 1013 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; 1014 1015 control->poll = 0; 1016 control->super = 0; 1017 } 1018 } 1019 1020 static inline void __unpack_control(struct l2cap_chan *chan, 1021 struct sk_buff *skb) 1022 { 1023 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1024 __unpack_extended_control(get_unaligned_le32(skb->data), 1025 &bt_cb(skb)->l2cap); 1026 skb_pull(skb, L2CAP_EXT_CTRL_SIZE); 1027 } else { 1028 __unpack_enhanced_control(get_unaligned_le16(skb->data), 1029 &bt_cb(skb)->l2cap); 1030 skb_pull(skb, L2CAP_ENH_CTRL_SIZE); 1031 } 1032 } 1033 1034 static u32 __pack_extended_control(struct l2cap_ctrl *control) 1035 { 1036 u32 packed; 1037 1038 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; 1039 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; 1040 1041 if (control->sframe) { 1042 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; 1043 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; 1044 packed |= L2CAP_EXT_CTRL_FRAME_TYPE; 1045 } else { 1046 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; 1047 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; 1048 } 1049 1050 return packed; 1051 } 1052 1053 static u16 __pack_enhanced_control(struct l2cap_ctrl *control) 1054 { 1055 u16 packed; 1056 1057 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; 1058 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; 1059 1060 if (control->sframe) { 1061 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; 1062 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; 1063 packed |= L2CAP_CTRL_FRAME_TYPE; 1064 } else { 1065 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; 1066 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; 1067 } 1068 1069 return packed; 1070 } 1071 1072 static inline void __pack_control(struct l2cap_chan *chan, 1073 struct l2cap_ctrl *control, 1074 struct sk_buff *skb) 1075 { 1076 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1077 put_unaligned_le32(__pack_extended_control(control), 1078 skb->data + L2CAP_HDR_SIZE); 1079 } else { 1080 put_unaligned_le16(__pack_enhanced_control(control), 1081 skb->data + L2CAP_HDR_SIZE); 1082 } 1083 } 1084 1085 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) 1086 { 1087 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1088 return L2CAP_EXT_HDR_SIZE; 1089 else 1090 return L2CAP_ENH_HDR_SIZE; 1091 } 1092 1093 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, 1094 u32 control) 1095 { 1096 struct sk_buff *skb; 1097 struct l2cap_hdr *lh; 1098 int hlen = __ertm_hdr_size(chan); 1099 1100 if (chan->fcs == L2CAP_FCS_CRC16) 1101 hlen += L2CAP_FCS_SIZE; 1102 1103 skb = bt_skb_alloc(hlen, GFP_KERNEL); 1104 1105 if (!skb) 1106 return ERR_PTR(-ENOMEM); 1107 1108 lh = skb_put(skb, L2CAP_HDR_SIZE); 1109 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 1110 lh->cid = cpu_to_le16(chan->dcid); 1111 1112 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1113 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 1114 else 1115 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 1116 1117 if (chan->fcs == L2CAP_FCS_CRC16) { 1118 u16 fcs = crc16(0, (u8 *)skb->data, skb->len); 1119 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1120 } 1121 1122 skb->priority = HCI_PRIO_MAX; 1123 return skb; 1124 } 1125 1126 static void l2cap_send_sframe(struct l2cap_chan *chan, 1127 struct l2cap_ctrl *control) 1128 { 1129 struct sk_buff *skb; 1130 u32 control_field; 1131 1132 BT_DBG("chan %p, control %p", chan, control); 1133 1134 if (!control->sframe) 1135 return; 1136 1137 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && 1138 !control->poll) 1139 control->final = 1; 1140 1141 if (control->super == L2CAP_SUPER_RR) 1142 clear_bit(CONN_RNR_SENT, &chan->conn_state); 1143 else if (control->super == L2CAP_SUPER_RNR) 1144 set_bit(CONN_RNR_SENT, &chan->conn_state); 1145 1146 if (control->super != L2CAP_SUPER_SREJ) { 1147 chan->last_acked_seq = control->reqseq; 1148 __clear_ack_timer(chan); 1149 } 1150 1151 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, 1152 control->final, control->poll, control->super); 1153 1154 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1155 control_field = __pack_extended_control(control); 1156 else 1157 control_field = __pack_enhanced_control(control); 1158 1159 skb = l2cap_create_sframe_pdu(chan, control_field); 1160 if (!IS_ERR(skb)) 1161 l2cap_do_send(chan, skb); 1162 } 1163 1164 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) 1165 { 1166 struct l2cap_ctrl control; 1167 1168 BT_DBG("chan %p, poll %d", chan, poll); 1169 1170 memset(&control, 0, sizeof(control)); 1171 control.sframe = 1; 1172 control.poll = poll; 1173 1174 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 1175 control.super = L2CAP_SUPER_RNR; 1176 else 1177 control.super = L2CAP_SUPER_RR; 1178 1179 control.reqseq = chan->buffer_seq; 1180 l2cap_send_sframe(chan, &control); 1181 } 1182 1183 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 1184 { 1185 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 1186 return true; 1187 1188 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 1189 } 1190 1191 void l2cap_send_conn_req(struct l2cap_chan *chan) 1192 { 1193 struct l2cap_conn *conn = chan->conn; 1194 struct l2cap_conn_req req; 1195 1196 req.scid = cpu_to_le16(chan->scid); 1197 req.psm = chan->psm; 1198 1199 chan->ident = l2cap_get_ident(conn); 1200 1201 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 1202 1203 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); 1204 } 1205 1206 static void l2cap_chan_ready(struct l2cap_chan *chan) 1207 { 1208 /* The channel may have already been flagged as connected in 1209 * case of receiving data before the L2CAP info req/rsp 1210 * procedure is complete. 1211 */ 1212 if (chan->state == BT_CONNECTED) 1213 return; 1214 1215 /* This clears all conf flags, including CONF_NOT_COMPLETE */ 1216 chan->conf_state = 0; 1217 __clear_chan_timer(chan); 1218 1219 switch (chan->mode) { 1220 case L2CAP_MODE_LE_FLOWCTL: 1221 case L2CAP_MODE_EXT_FLOWCTL: 1222 if (!chan->tx_credits) 1223 chan->ops->suspend(chan); 1224 break; 1225 } 1226 1227 chan->state = BT_CONNECTED; 1228 1229 chan->ops->ready(chan); 1230 } 1231 1232 static void l2cap_le_connect(struct l2cap_chan *chan) 1233 { 1234 struct l2cap_conn *conn = chan->conn; 1235 struct l2cap_le_conn_req req; 1236 1237 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) 1238 return; 1239 1240 if (!chan->imtu) 1241 chan->imtu = chan->conn->mtu; 1242 1243 l2cap_le_flowctl_init(chan, 0); 1244 1245 memset(&req, 0, sizeof(req)); 1246 req.psm = chan->psm; 1247 req.scid = cpu_to_le16(chan->scid); 1248 req.mtu = cpu_to_le16(chan->imtu); 1249 req.mps = cpu_to_le16(chan->mps); 1250 req.credits = cpu_to_le16(chan->rx_credits); 1251 1252 chan->ident = l2cap_get_ident(conn); 1253 1254 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, 1255 sizeof(req), &req); 1256 } 1257 1258 struct l2cap_ecred_conn_data { 1259 struct { 1260 struct l2cap_ecred_conn_req req; 1261 __le16 scid[5]; 1262 } __packed pdu; 1263 struct l2cap_chan *chan; 1264 struct pid *pid; 1265 int count; 1266 }; 1267 1268 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) 1269 { 1270 struct l2cap_ecred_conn_data *conn = data; 1271 struct pid *pid; 1272 1273 if (chan == conn->chan) 1274 return; 1275 1276 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) 1277 return; 1278 1279 pid = chan->ops->get_peer_pid(chan); 1280 1281 /* Only add deferred channels with the same PID/PSM */ 1282 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || 1283 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) 1284 return; 1285 1286 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 1287 return; 1288 1289 l2cap_ecred_init(chan, 0); 1290 1291 /* Set the same ident so we can match on the rsp */ 1292 chan->ident = conn->chan->ident; 1293 1294 /* Include all channels deferred */ 1295 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); 1296 1297 conn->count++; 1298 } 1299 1300 static void l2cap_ecred_connect(struct l2cap_chan *chan) 1301 { 1302 struct l2cap_conn *conn = chan->conn; 1303 struct l2cap_ecred_conn_data data; 1304 1305 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 1306 return; 1307 1308 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 1309 return; 1310 1311 l2cap_ecred_init(chan, 0); 1312 1313 memset(&data, 0, sizeof(data)); 1314 data.pdu.req.psm = chan->psm; 1315 data.pdu.req.mtu = cpu_to_le16(chan->imtu); 1316 data.pdu.req.mps = cpu_to_le16(chan->mps); 1317 data.pdu.req.credits = cpu_to_le16(chan->rx_credits); 1318 data.pdu.scid[0] = cpu_to_le16(chan->scid); 1319 1320 chan->ident = l2cap_get_ident(conn); 1321 1322 data.count = 1; 1323 data.chan = chan; 1324 data.pid = chan->ops->get_peer_pid(chan); 1325 1326 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); 1327 1328 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, 1329 sizeof(data.pdu.req) + data.count * sizeof(__le16), 1330 &data.pdu); 1331 } 1332 1333 static void l2cap_le_start(struct l2cap_chan *chan) 1334 { 1335 struct l2cap_conn *conn = chan->conn; 1336 1337 if (!smp_conn_security(conn->hcon, chan->sec_level)) 1338 return; 1339 1340 if (!chan->psm) { 1341 l2cap_chan_ready(chan); 1342 return; 1343 } 1344 1345 if (chan->state == BT_CONNECT) { 1346 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) 1347 l2cap_ecred_connect(chan); 1348 else 1349 l2cap_le_connect(chan); 1350 } 1351 } 1352 1353 static void l2cap_start_connection(struct l2cap_chan *chan) 1354 { 1355 if (chan->conn->hcon->type == LE_LINK) { 1356 l2cap_le_start(chan); 1357 } else { 1358 l2cap_send_conn_req(chan); 1359 } 1360 } 1361 1362 static void l2cap_request_info(struct l2cap_conn *conn) 1363 { 1364 struct l2cap_info_req req; 1365 1366 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1367 return; 1368 1369 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 1370 1371 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 1372 conn->info_ident = l2cap_get_ident(conn); 1373 1374 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 1375 1376 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 1377 sizeof(req), &req); 1378 } 1379 1380 static bool l2cap_check_enc_key_size(struct hci_conn *hcon) 1381 { 1382 /* The minimum encryption key size needs to be enforced by the 1383 * host stack before establishing any L2CAP connections. The 1384 * specification in theory allows a minimum of 1, but to align 1385 * BR/EDR and LE transports, a minimum of 7 is chosen. 1386 * 1387 * This check might also be called for unencrypted connections 1388 * that have no key size requirements. Ensure that the link is 1389 * actually encrypted before enforcing a key size. 1390 */ 1391 int min_key_size = hcon->hdev->min_enc_key_size; 1392 1393 /* On FIPS security level, key size must be 16 bytes */ 1394 if (hcon->sec_level == BT_SECURITY_FIPS) 1395 min_key_size = 16; 1396 1397 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1398 hcon->enc_key_size >= min_key_size); 1399 } 1400 1401 static void l2cap_do_start(struct l2cap_chan *chan) 1402 { 1403 struct l2cap_conn *conn = chan->conn; 1404 1405 if (conn->hcon->type == LE_LINK) { 1406 l2cap_le_start(chan); 1407 return; 1408 } 1409 1410 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { 1411 l2cap_request_info(conn); 1412 return; 1413 } 1414 1415 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 1416 return; 1417 1418 if (!l2cap_chan_check_security(chan, true) || 1419 !__l2cap_no_conn_pending(chan)) 1420 return; 1421 1422 if (l2cap_check_enc_key_size(conn->hcon)) 1423 l2cap_start_connection(chan); 1424 else 1425 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 1426 } 1427 1428 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 1429 { 1430 u32 local_feat_mask = l2cap_feat_mask; 1431 if (!disable_ertm) 1432 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; 1433 1434 switch (mode) { 1435 case L2CAP_MODE_ERTM: 1436 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; 1437 case L2CAP_MODE_STREAMING: 1438 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; 1439 default: 1440 return 0x00; 1441 } 1442 } 1443 1444 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) 1445 { 1446 struct l2cap_conn *conn = chan->conn; 1447 struct l2cap_disconn_req req; 1448 1449 if (!conn) 1450 return; 1451 1452 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { 1453 __clear_retrans_timer(chan); 1454 __clear_monitor_timer(chan); 1455 __clear_ack_timer(chan); 1456 } 1457 1458 req.dcid = cpu_to_le16(chan->dcid); 1459 req.scid = cpu_to_le16(chan->scid); 1460 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, 1461 sizeof(req), &req); 1462 1463 l2cap_state_change_and_error(chan, BT_DISCONN, err); 1464 } 1465 1466 /* ---- L2CAP connections ---- */ 1467 static void l2cap_conn_start(struct l2cap_conn *conn) 1468 { 1469 struct l2cap_chan *chan, *tmp; 1470 1471 BT_DBG("conn %p", conn); 1472 1473 mutex_lock(&conn->chan_lock); 1474 1475 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 1476 l2cap_chan_lock(chan); 1477 1478 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1479 l2cap_chan_ready(chan); 1480 l2cap_chan_unlock(chan); 1481 continue; 1482 } 1483 1484 if (chan->state == BT_CONNECT) { 1485 if (!l2cap_chan_check_security(chan, true) || 1486 !__l2cap_no_conn_pending(chan)) { 1487 l2cap_chan_unlock(chan); 1488 continue; 1489 } 1490 1491 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 1492 && test_bit(CONF_STATE2_DEVICE, 1493 &chan->conf_state)) { 1494 l2cap_chan_close(chan, ECONNRESET); 1495 l2cap_chan_unlock(chan); 1496 continue; 1497 } 1498 1499 if (l2cap_check_enc_key_size(conn->hcon)) 1500 l2cap_start_connection(chan); 1501 else 1502 l2cap_chan_close(chan, ECONNREFUSED); 1503 1504 } else if (chan->state == BT_CONNECT2) { 1505 struct l2cap_conn_rsp rsp; 1506 char buf[128]; 1507 rsp.scid = cpu_to_le16(chan->dcid); 1508 rsp.dcid = cpu_to_le16(chan->scid); 1509 1510 if (l2cap_chan_check_security(chan, false)) { 1511 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 1512 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1513 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1514 chan->ops->defer(chan); 1515 1516 } else { 1517 l2cap_state_change(chan, BT_CONFIG); 1518 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1519 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1520 } 1521 } else { 1522 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1523 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1524 } 1525 1526 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1527 sizeof(rsp), &rsp); 1528 1529 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 1530 rsp.result != L2CAP_CR_SUCCESS) { 1531 l2cap_chan_unlock(chan); 1532 continue; 1533 } 1534 1535 set_bit(CONF_REQ_SENT, &chan->conf_state); 1536 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1537 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 1538 chan->num_conf_req++; 1539 } 1540 1541 l2cap_chan_unlock(chan); 1542 } 1543 1544 mutex_unlock(&conn->chan_lock); 1545 } 1546 1547 static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1548 { 1549 struct hci_conn *hcon = conn->hcon; 1550 struct hci_dev *hdev = hcon->hdev; 1551 1552 BT_DBG("%s conn %p", hdev->name, conn); 1553 1554 /* For outgoing pairing which doesn't necessarily have an 1555 * associated socket (e.g. mgmt_pair_device). 1556 */ 1557 if (hcon->out) 1558 smp_conn_security(hcon, hcon->pending_sec_level); 1559 1560 /* For LE peripheral connections, make sure the connection interval 1561 * is in the range of the minimum and maximum interval that has 1562 * been configured for this connection. If not, then trigger 1563 * the connection update procedure. 1564 */ 1565 if (hcon->role == HCI_ROLE_SLAVE && 1566 (hcon->le_conn_interval < hcon->le_conn_min_interval || 1567 hcon->le_conn_interval > hcon->le_conn_max_interval)) { 1568 struct l2cap_conn_param_update_req req; 1569 1570 req.min = cpu_to_le16(hcon->le_conn_min_interval); 1571 req.max = cpu_to_le16(hcon->le_conn_max_interval); 1572 req.latency = cpu_to_le16(hcon->le_conn_latency); 1573 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); 1574 1575 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1576 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); 1577 } 1578 } 1579 1580 static void l2cap_conn_ready(struct l2cap_conn *conn) 1581 { 1582 struct l2cap_chan *chan; 1583 struct hci_conn *hcon = conn->hcon; 1584 1585 BT_DBG("conn %p", conn); 1586 1587 if (hcon->type == ACL_LINK) 1588 l2cap_request_info(conn); 1589 1590 mutex_lock(&conn->chan_lock); 1591 1592 list_for_each_entry(chan, &conn->chan_l, list) { 1593 1594 l2cap_chan_lock(chan); 1595 1596 if (hcon->type == LE_LINK) { 1597 l2cap_le_start(chan); 1598 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1599 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 1600 l2cap_chan_ready(chan); 1601 } else if (chan->state == BT_CONNECT) { 1602 l2cap_do_start(chan); 1603 } 1604 1605 l2cap_chan_unlock(chan); 1606 } 1607 1608 mutex_unlock(&conn->chan_lock); 1609 1610 if (hcon->type == LE_LINK) 1611 l2cap_le_conn_ready(conn); 1612 1613 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); 1614 } 1615 1616 /* Notify sockets that we cannot guaranty reliability anymore */ 1617 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 1618 { 1619 struct l2cap_chan *chan; 1620 1621 BT_DBG("conn %p", conn); 1622 1623 mutex_lock(&conn->chan_lock); 1624 1625 list_for_each_entry(chan, &conn->chan_l, list) { 1626 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1627 l2cap_chan_set_err(chan, err); 1628 } 1629 1630 mutex_unlock(&conn->chan_lock); 1631 } 1632 1633 static void l2cap_info_timeout(struct work_struct *work) 1634 { 1635 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1636 info_timer.work); 1637 1638 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 1639 conn->info_ident = 0; 1640 1641 l2cap_conn_start(conn); 1642 } 1643 1644 /* 1645 * l2cap_user 1646 * External modules can register l2cap_user objects on l2cap_conn. The ->probe 1647 * callback is called during registration. The ->remove callback is called 1648 * during unregistration. 1649 * An l2cap_user object can either be explicitly unregistered or when the 1650 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, 1651 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. 1652 * External modules must own a reference to the l2cap_conn object if they intend 1653 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at 1654 * any time if they don't. 1655 */ 1656 1657 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) 1658 { 1659 struct hci_dev *hdev = conn->hcon->hdev; 1660 int ret; 1661 1662 /* We need to check whether l2cap_conn is registered. If it is not, we 1663 * must not register the l2cap_user. l2cap_conn_del() is unregisters 1664 * l2cap_conn objects, but doesn't provide its own locking. Instead, it 1665 * relies on the parent hci_conn object to be locked. This itself relies 1666 * on the hci_dev object to be locked. So we must lock the hci device 1667 * here, too. */ 1668 1669 hci_dev_lock(hdev); 1670 1671 if (!list_empty(&user->list)) { 1672 ret = -EINVAL; 1673 goto out_unlock; 1674 } 1675 1676 /* conn->hchan is NULL after l2cap_conn_del() was called */ 1677 if (!conn->hchan) { 1678 ret = -ENODEV; 1679 goto out_unlock; 1680 } 1681 1682 ret = user->probe(conn, user); 1683 if (ret) 1684 goto out_unlock; 1685 1686 list_add(&user->list, &conn->users); 1687 ret = 0; 1688 1689 out_unlock: 1690 hci_dev_unlock(hdev); 1691 return ret; 1692 } 1693 EXPORT_SYMBOL(l2cap_register_user); 1694 1695 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) 1696 { 1697 struct hci_dev *hdev = conn->hcon->hdev; 1698 1699 hci_dev_lock(hdev); 1700 1701 if (list_empty(&user->list)) 1702 goto out_unlock; 1703 1704 list_del_init(&user->list); 1705 user->remove(conn, user); 1706 1707 out_unlock: 1708 hci_dev_unlock(hdev); 1709 } 1710 EXPORT_SYMBOL(l2cap_unregister_user); 1711 1712 static void l2cap_unregister_all_users(struct l2cap_conn *conn) 1713 { 1714 struct l2cap_user *user; 1715 1716 while (!list_empty(&conn->users)) { 1717 user = list_first_entry(&conn->users, struct l2cap_user, list); 1718 list_del_init(&user->list); 1719 user->remove(conn, user); 1720 } 1721 } 1722 1723 static void l2cap_conn_del(struct hci_conn *hcon, int err) 1724 { 1725 struct l2cap_conn *conn = hcon->l2cap_data; 1726 struct l2cap_chan *chan, *l; 1727 1728 if (!conn) 1729 return; 1730 1731 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 1732 1733 kfree_skb(conn->rx_skb); 1734 1735 skb_queue_purge(&conn->pending_rx); 1736 1737 /* We can not call flush_work(&conn->pending_rx_work) here since we 1738 * might block if we are running on a worker from the same workqueue 1739 * pending_rx_work is waiting on. 1740 */ 1741 if (work_pending(&conn->pending_rx_work)) 1742 cancel_work_sync(&conn->pending_rx_work); 1743 1744 cancel_delayed_work_sync(&conn->id_addr_timer); 1745 1746 l2cap_unregister_all_users(conn); 1747 1748 /* Force the connection to be immediately dropped */ 1749 hcon->disc_timeout = 0; 1750 1751 mutex_lock(&conn->chan_lock); 1752 1753 /* Kill channels */ 1754 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1755 l2cap_chan_hold(chan); 1756 l2cap_chan_lock(chan); 1757 1758 l2cap_chan_del(chan, err); 1759 1760 chan->ops->close(chan); 1761 1762 l2cap_chan_unlock(chan); 1763 l2cap_chan_put(chan); 1764 } 1765 1766 mutex_unlock(&conn->chan_lock); 1767 1768 hci_chan_del(conn->hchan); 1769 1770 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1771 cancel_delayed_work_sync(&conn->info_timer); 1772 1773 hcon->l2cap_data = NULL; 1774 conn->hchan = NULL; 1775 l2cap_conn_put(conn); 1776 } 1777 1778 static void l2cap_conn_free(struct kref *ref) 1779 { 1780 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); 1781 1782 hci_conn_put(conn->hcon); 1783 kfree(conn); 1784 } 1785 1786 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) 1787 { 1788 kref_get(&conn->ref); 1789 return conn; 1790 } 1791 EXPORT_SYMBOL(l2cap_conn_get); 1792 1793 void l2cap_conn_put(struct l2cap_conn *conn) 1794 { 1795 kref_put(&conn->ref, l2cap_conn_free); 1796 } 1797 EXPORT_SYMBOL(l2cap_conn_put); 1798 1799 /* ---- Socket interface ---- */ 1800 1801 /* Find socket with psm and source / destination bdaddr. 1802 * Returns closest match. 1803 */ 1804 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, 1805 bdaddr_t *src, 1806 bdaddr_t *dst, 1807 u8 link_type) 1808 { 1809 struct l2cap_chan *c, *tmp, *c1 = NULL; 1810 1811 read_lock(&chan_list_lock); 1812 1813 list_for_each_entry_safe(c, tmp, &chan_list, global_l) { 1814 if (state && c->state != state) 1815 continue; 1816 1817 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) 1818 continue; 1819 1820 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) 1821 continue; 1822 1823 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { 1824 int src_match, dst_match; 1825 int src_any, dst_any; 1826 1827 /* Exact match. */ 1828 src_match = !bacmp(&c->src, src); 1829 dst_match = !bacmp(&c->dst, dst); 1830 if (src_match && dst_match) { 1831 if (!l2cap_chan_hold_unless_zero(c)) 1832 continue; 1833 1834 read_unlock(&chan_list_lock); 1835 return c; 1836 } 1837 1838 /* Closest match */ 1839 src_any = !bacmp(&c->src, BDADDR_ANY); 1840 dst_any = !bacmp(&c->dst, BDADDR_ANY); 1841 if ((src_match && dst_any) || (src_any && dst_match) || 1842 (src_any && dst_any)) 1843 c1 = c; 1844 } 1845 } 1846 1847 if (c1) 1848 c1 = l2cap_chan_hold_unless_zero(c1); 1849 1850 read_unlock(&chan_list_lock); 1851 1852 return c1; 1853 } 1854 1855 static void l2cap_monitor_timeout(struct work_struct *work) 1856 { 1857 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1858 monitor_timer.work); 1859 1860 BT_DBG("chan %p", chan); 1861 1862 l2cap_chan_lock(chan); 1863 1864 if (!chan->conn) { 1865 l2cap_chan_unlock(chan); 1866 l2cap_chan_put(chan); 1867 return; 1868 } 1869 1870 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); 1871 1872 l2cap_chan_unlock(chan); 1873 l2cap_chan_put(chan); 1874 } 1875 1876 static void l2cap_retrans_timeout(struct work_struct *work) 1877 { 1878 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1879 retrans_timer.work); 1880 1881 BT_DBG("chan %p", chan); 1882 1883 l2cap_chan_lock(chan); 1884 1885 if (!chan->conn) { 1886 l2cap_chan_unlock(chan); 1887 l2cap_chan_put(chan); 1888 return; 1889 } 1890 1891 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); 1892 l2cap_chan_unlock(chan); 1893 l2cap_chan_put(chan); 1894 } 1895 1896 static void l2cap_streaming_send(struct l2cap_chan *chan, 1897 struct sk_buff_head *skbs) 1898 { 1899 struct sk_buff *skb; 1900 struct l2cap_ctrl *control; 1901 1902 BT_DBG("chan %p, skbs %p", chan, skbs); 1903 1904 skb_queue_splice_tail_init(skbs, &chan->tx_q); 1905 1906 while (!skb_queue_empty(&chan->tx_q)) { 1907 1908 skb = skb_dequeue(&chan->tx_q); 1909 1910 bt_cb(skb)->l2cap.retries = 1; 1911 control = &bt_cb(skb)->l2cap; 1912 1913 control->reqseq = 0; 1914 control->txseq = chan->next_tx_seq; 1915 1916 __pack_control(chan, control, skb); 1917 1918 if (chan->fcs == L2CAP_FCS_CRC16) { 1919 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 1920 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1921 } 1922 1923 l2cap_do_send(chan, skb); 1924 1925 BT_DBG("Sent txseq %u", control->txseq); 1926 1927 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1928 chan->frames_sent++; 1929 } 1930 } 1931 1932 static int l2cap_ertm_send(struct l2cap_chan *chan) 1933 { 1934 struct sk_buff *skb, *tx_skb; 1935 struct l2cap_ctrl *control; 1936 int sent = 0; 1937 1938 BT_DBG("chan %p", chan); 1939 1940 if (chan->state != BT_CONNECTED) 1941 return -ENOTCONN; 1942 1943 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1944 return 0; 1945 1946 while (chan->tx_send_head && 1947 chan->unacked_frames < chan->remote_tx_win && 1948 chan->tx_state == L2CAP_TX_STATE_XMIT) { 1949 1950 skb = chan->tx_send_head; 1951 1952 bt_cb(skb)->l2cap.retries = 1; 1953 control = &bt_cb(skb)->l2cap; 1954 1955 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1956 control->final = 1; 1957 1958 control->reqseq = chan->buffer_seq; 1959 chan->last_acked_seq = chan->buffer_seq; 1960 control->txseq = chan->next_tx_seq; 1961 1962 __pack_control(chan, control, skb); 1963 1964 if (chan->fcs == L2CAP_FCS_CRC16) { 1965 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 1966 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1967 } 1968 1969 /* Clone after data has been modified. Data is assumed to be 1970 read-only (for locking purposes) on cloned sk_buffs. 1971 */ 1972 tx_skb = skb_clone(skb, GFP_KERNEL); 1973 1974 if (!tx_skb) 1975 break; 1976 1977 __set_retrans_timer(chan); 1978 1979 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1980 chan->unacked_frames++; 1981 chan->frames_sent++; 1982 sent++; 1983 1984 if (skb_queue_is_last(&chan->tx_q, skb)) 1985 chan->tx_send_head = NULL; 1986 else 1987 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 1988 1989 l2cap_do_send(chan, tx_skb); 1990 BT_DBG("Sent txseq %u", control->txseq); 1991 } 1992 1993 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, 1994 chan->unacked_frames, skb_queue_len(&chan->tx_q)); 1995 1996 return sent; 1997 } 1998 1999 static void l2cap_ertm_resend(struct l2cap_chan *chan) 2000 { 2001 struct l2cap_ctrl control; 2002 struct sk_buff *skb; 2003 struct sk_buff *tx_skb; 2004 u16 seq; 2005 2006 BT_DBG("chan %p", chan); 2007 2008 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2009 return; 2010 2011 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { 2012 seq = l2cap_seq_list_pop(&chan->retrans_list); 2013 2014 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); 2015 if (!skb) { 2016 BT_DBG("Error: Can't retransmit seq %d, frame missing", 2017 seq); 2018 continue; 2019 } 2020 2021 bt_cb(skb)->l2cap.retries++; 2022 control = bt_cb(skb)->l2cap; 2023 2024 if (chan->max_tx != 0 && 2025 bt_cb(skb)->l2cap.retries > chan->max_tx) { 2026 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 2027 l2cap_send_disconn_req(chan, ECONNRESET); 2028 l2cap_seq_list_clear(&chan->retrans_list); 2029 break; 2030 } 2031 2032 control.reqseq = chan->buffer_seq; 2033 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 2034 control.final = 1; 2035 else 2036 control.final = 0; 2037 2038 if (skb_cloned(skb)) { 2039 /* Cloned sk_buffs are read-only, so we need a 2040 * writeable copy 2041 */ 2042 tx_skb = skb_copy(skb, GFP_KERNEL); 2043 } else { 2044 tx_skb = skb_clone(skb, GFP_KERNEL); 2045 } 2046 2047 if (!tx_skb) { 2048 l2cap_seq_list_clear(&chan->retrans_list); 2049 break; 2050 } 2051 2052 /* Update skb contents */ 2053 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 2054 put_unaligned_le32(__pack_extended_control(&control), 2055 tx_skb->data + L2CAP_HDR_SIZE); 2056 } else { 2057 put_unaligned_le16(__pack_enhanced_control(&control), 2058 tx_skb->data + L2CAP_HDR_SIZE); 2059 } 2060 2061 /* Update FCS */ 2062 if (chan->fcs == L2CAP_FCS_CRC16) { 2063 u16 fcs = crc16(0, (u8 *) tx_skb->data, 2064 tx_skb->len - L2CAP_FCS_SIZE); 2065 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - 2066 L2CAP_FCS_SIZE); 2067 } 2068 2069 l2cap_do_send(chan, tx_skb); 2070 2071 BT_DBG("Resent txseq %d", control.txseq); 2072 2073 chan->last_acked_seq = chan->buffer_seq; 2074 } 2075 } 2076 2077 static void l2cap_retransmit(struct l2cap_chan *chan, 2078 struct l2cap_ctrl *control) 2079 { 2080 BT_DBG("chan %p, control %p", chan, control); 2081 2082 l2cap_seq_list_append(&chan->retrans_list, control->reqseq); 2083 l2cap_ertm_resend(chan); 2084 } 2085 2086 static void l2cap_retransmit_all(struct l2cap_chan *chan, 2087 struct l2cap_ctrl *control) 2088 { 2089 struct sk_buff *skb; 2090 2091 BT_DBG("chan %p, control %p", chan, control); 2092 2093 if (control->poll) 2094 set_bit(CONN_SEND_FBIT, &chan->conn_state); 2095 2096 l2cap_seq_list_clear(&chan->retrans_list); 2097 2098 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2099 return; 2100 2101 if (chan->unacked_frames) { 2102 skb_queue_walk(&chan->tx_q, skb) { 2103 if (bt_cb(skb)->l2cap.txseq == control->reqseq || 2104 skb == chan->tx_send_head) 2105 break; 2106 } 2107 2108 skb_queue_walk_from(&chan->tx_q, skb) { 2109 if (skb == chan->tx_send_head) 2110 break; 2111 2112 l2cap_seq_list_append(&chan->retrans_list, 2113 bt_cb(skb)->l2cap.txseq); 2114 } 2115 2116 l2cap_ertm_resend(chan); 2117 } 2118 } 2119 2120 static void l2cap_send_ack(struct l2cap_chan *chan) 2121 { 2122 struct l2cap_ctrl control; 2123 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 2124 chan->last_acked_seq); 2125 int threshold; 2126 2127 BT_DBG("chan %p last_acked_seq %d buffer_seq %d", 2128 chan, chan->last_acked_seq, chan->buffer_seq); 2129 2130 memset(&control, 0, sizeof(control)); 2131 control.sframe = 1; 2132 2133 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 2134 chan->rx_state == L2CAP_RX_STATE_RECV) { 2135 __clear_ack_timer(chan); 2136 control.super = L2CAP_SUPER_RNR; 2137 control.reqseq = chan->buffer_seq; 2138 l2cap_send_sframe(chan, &control); 2139 } else { 2140 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { 2141 l2cap_ertm_send(chan); 2142 /* If any i-frames were sent, they included an ack */ 2143 if (chan->buffer_seq == chan->last_acked_seq) 2144 frames_to_ack = 0; 2145 } 2146 2147 /* Ack now if the window is 3/4ths full. 2148 * Calculate without mul or div 2149 */ 2150 threshold = chan->ack_win; 2151 threshold += threshold << 1; 2152 threshold >>= 2; 2153 2154 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, 2155 threshold); 2156 2157 if (frames_to_ack >= threshold) { 2158 __clear_ack_timer(chan); 2159 control.super = L2CAP_SUPER_RR; 2160 control.reqseq = chan->buffer_seq; 2161 l2cap_send_sframe(chan, &control); 2162 frames_to_ack = 0; 2163 } 2164 2165 if (frames_to_ack) 2166 __set_ack_timer(chan); 2167 } 2168 } 2169 2170 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 2171 struct msghdr *msg, int len, 2172 int count, struct sk_buff *skb) 2173 { 2174 struct l2cap_conn *conn = chan->conn; 2175 struct sk_buff **frag; 2176 int sent = 0; 2177 2178 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter)) 2179 return -EFAULT; 2180 2181 sent += count; 2182 len -= count; 2183 2184 /* Continuation fragments (no L2CAP header) */ 2185 frag = &skb_shinfo(skb)->frag_list; 2186 while (len) { 2187 struct sk_buff *tmp; 2188 2189 count = min_t(unsigned int, conn->mtu, len); 2190 2191 tmp = chan->ops->alloc_skb(chan, 0, count, 2192 msg->msg_flags & MSG_DONTWAIT); 2193 if (IS_ERR(tmp)) 2194 return PTR_ERR(tmp); 2195 2196 *frag = tmp; 2197 2198 if (!copy_from_iter_full(skb_put(*frag, count), count, 2199 &msg->msg_iter)) 2200 return -EFAULT; 2201 2202 sent += count; 2203 len -= count; 2204 2205 skb->len += (*frag)->len; 2206 skb->data_len += (*frag)->len; 2207 2208 frag = &(*frag)->next; 2209 } 2210 2211 return sent; 2212 } 2213 2214 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 2215 struct msghdr *msg, size_t len) 2216 { 2217 struct l2cap_conn *conn = chan->conn; 2218 struct sk_buff *skb; 2219 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 2220 struct l2cap_hdr *lh; 2221 2222 BT_DBG("chan %p psm 0x%2.2x len %zu", chan, 2223 __le16_to_cpu(chan->psm), len); 2224 2225 count = min_t(unsigned int, (conn->mtu - hlen), len); 2226 2227 skb = chan->ops->alloc_skb(chan, hlen, count, 2228 msg->msg_flags & MSG_DONTWAIT); 2229 if (IS_ERR(skb)) 2230 return skb; 2231 2232 /* Create L2CAP header */ 2233 lh = skb_put(skb, L2CAP_HDR_SIZE); 2234 lh->cid = cpu_to_le16(chan->dcid); 2235 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); 2236 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); 2237 2238 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2239 if (unlikely(err < 0)) { 2240 kfree_skb(skb); 2241 return ERR_PTR(err); 2242 } 2243 return skb; 2244 } 2245 2246 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 2247 struct msghdr *msg, size_t len) 2248 { 2249 struct l2cap_conn *conn = chan->conn; 2250 struct sk_buff *skb; 2251 int err, count; 2252 struct l2cap_hdr *lh; 2253 2254 BT_DBG("chan %p len %zu", chan, len); 2255 2256 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 2257 2258 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, 2259 msg->msg_flags & MSG_DONTWAIT); 2260 if (IS_ERR(skb)) 2261 return skb; 2262 2263 /* Create L2CAP header */ 2264 lh = skb_put(skb, L2CAP_HDR_SIZE); 2265 lh->cid = cpu_to_le16(chan->dcid); 2266 lh->len = cpu_to_le16(len); 2267 2268 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2269 if (unlikely(err < 0)) { 2270 kfree_skb(skb); 2271 return ERR_PTR(err); 2272 } 2273 return skb; 2274 } 2275 2276 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 2277 struct msghdr *msg, size_t len, 2278 u16 sdulen) 2279 { 2280 struct l2cap_conn *conn = chan->conn; 2281 struct sk_buff *skb; 2282 int err, count, hlen; 2283 struct l2cap_hdr *lh; 2284 2285 BT_DBG("chan %p len %zu", chan, len); 2286 2287 if (!conn) 2288 return ERR_PTR(-ENOTCONN); 2289 2290 hlen = __ertm_hdr_size(chan); 2291 2292 if (sdulen) 2293 hlen += L2CAP_SDULEN_SIZE; 2294 2295 if (chan->fcs == L2CAP_FCS_CRC16) 2296 hlen += L2CAP_FCS_SIZE; 2297 2298 count = min_t(unsigned int, (conn->mtu - hlen), len); 2299 2300 skb = chan->ops->alloc_skb(chan, hlen, count, 2301 msg->msg_flags & MSG_DONTWAIT); 2302 if (IS_ERR(skb)) 2303 return skb; 2304 2305 /* Create L2CAP header */ 2306 lh = skb_put(skb, L2CAP_HDR_SIZE); 2307 lh->cid = cpu_to_le16(chan->dcid); 2308 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2309 2310 /* Control header is populated later */ 2311 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2312 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 2313 else 2314 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 2315 2316 if (sdulen) 2317 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2318 2319 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2320 if (unlikely(err < 0)) { 2321 kfree_skb(skb); 2322 return ERR_PTR(err); 2323 } 2324 2325 bt_cb(skb)->l2cap.fcs = chan->fcs; 2326 bt_cb(skb)->l2cap.retries = 0; 2327 return skb; 2328 } 2329 2330 static int l2cap_segment_sdu(struct l2cap_chan *chan, 2331 struct sk_buff_head *seg_queue, 2332 struct msghdr *msg, size_t len) 2333 { 2334 struct sk_buff *skb; 2335 u16 sdu_len; 2336 size_t pdu_len; 2337 u8 sar; 2338 2339 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2340 2341 /* It is critical that ERTM PDUs fit in a single HCI fragment, 2342 * so fragmented skbs are not used. The HCI layer's handling 2343 * of fragmented skbs is not compatible with ERTM's queueing. 2344 */ 2345 2346 /* PDU size is derived from the HCI MTU */ 2347 pdu_len = chan->conn->mtu; 2348 2349 /* Constrain PDU size for BR/EDR connections */ 2350 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2351 2352 /* Adjust for largest possible L2CAP overhead. */ 2353 if (chan->fcs) 2354 pdu_len -= L2CAP_FCS_SIZE; 2355 2356 pdu_len -= __ertm_hdr_size(chan); 2357 2358 /* Remote device may have requested smaller PDUs */ 2359 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2360 2361 if (len <= pdu_len) { 2362 sar = L2CAP_SAR_UNSEGMENTED; 2363 sdu_len = 0; 2364 pdu_len = len; 2365 } else { 2366 sar = L2CAP_SAR_START; 2367 sdu_len = len; 2368 } 2369 2370 while (len > 0) { 2371 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); 2372 2373 if (IS_ERR(skb)) { 2374 __skb_queue_purge(seg_queue); 2375 return PTR_ERR(skb); 2376 } 2377 2378 bt_cb(skb)->l2cap.sar = sar; 2379 __skb_queue_tail(seg_queue, skb); 2380 2381 len -= pdu_len; 2382 if (sdu_len) 2383 sdu_len = 0; 2384 2385 if (len <= pdu_len) { 2386 sar = L2CAP_SAR_END; 2387 pdu_len = len; 2388 } else { 2389 sar = L2CAP_SAR_CONTINUE; 2390 } 2391 } 2392 2393 return 0; 2394 } 2395 2396 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, 2397 struct msghdr *msg, 2398 size_t len, u16 sdulen) 2399 { 2400 struct l2cap_conn *conn = chan->conn; 2401 struct sk_buff *skb; 2402 int err, count, hlen; 2403 struct l2cap_hdr *lh; 2404 2405 BT_DBG("chan %p len %zu", chan, len); 2406 2407 if (!conn) 2408 return ERR_PTR(-ENOTCONN); 2409 2410 hlen = L2CAP_HDR_SIZE; 2411 2412 if (sdulen) 2413 hlen += L2CAP_SDULEN_SIZE; 2414 2415 count = min_t(unsigned int, (conn->mtu - hlen), len); 2416 2417 skb = chan->ops->alloc_skb(chan, hlen, count, 2418 msg->msg_flags & MSG_DONTWAIT); 2419 if (IS_ERR(skb)) 2420 return skb; 2421 2422 /* Create L2CAP header */ 2423 lh = skb_put(skb, L2CAP_HDR_SIZE); 2424 lh->cid = cpu_to_le16(chan->dcid); 2425 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2426 2427 if (sdulen) 2428 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2429 2430 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2431 if (unlikely(err < 0)) { 2432 kfree_skb(skb); 2433 return ERR_PTR(err); 2434 } 2435 2436 return skb; 2437 } 2438 2439 static int l2cap_segment_le_sdu(struct l2cap_chan *chan, 2440 struct sk_buff_head *seg_queue, 2441 struct msghdr *msg, size_t len) 2442 { 2443 struct sk_buff *skb; 2444 size_t pdu_len; 2445 u16 sdu_len; 2446 2447 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2448 2449 sdu_len = len; 2450 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; 2451 2452 while (len > 0) { 2453 if (len <= pdu_len) 2454 pdu_len = len; 2455 2456 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); 2457 if (IS_ERR(skb)) { 2458 __skb_queue_purge(seg_queue); 2459 return PTR_ERR(skb); 2460 } 2461 2462 __skb_queue_tail(seg_queue, skb); 2463 2464 len -= pdu_len; 2465 2466 if (sdu_len) { 2467 sdu_len = 0; 2468 pdu_len += L2CAP_SDULEN_SIZE; 2469 } 2470 } 2471 2472 return 0; 2473 } 2474 2475 static void l2cap_le_flowctl_send(struct l2cap_chan *chan) 2476 { 2477 int sent = 0; 2478 2479 BT_DBG("chan %p", chan); 2480 2481 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { 2482 l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); 2483 chan->tx_credits--; 2484 sent++; 2485 } 2486 2487 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, 2488 skb_queue_len(&chan->tx_q)); 2489 } 2490 2491 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 2492 { 2493 struct sk_buff *skb; 2494 int err; 2495 struct sk_buff_head seg_queue; 2496 2497 if (!chan->conn) 2498 return -ENOTCONN; 2499 2500 /* Connectionless channel */ 2501 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2502 skb = l2cap_create_connless_pdu(chan, msg, len); 2503 if (IS_ERR(skb)) 2504 return PTR_ERR(skb); 2505 2506 l2cap_do_send(chan, skb); 2507 return len; 2508 } 2509 2510 switch (chan->mode) { 2511 case L2CAP_MODE_LE_FLOWCTL: 2512 case L2CAP_MODE_EXT_FLOWCTL: 2513 /* Check outgoing MTU */ 2514 if (len > chan->omtu) 2515 return -EMSGSIZE; 2516 2517 __skb_queue_head_init(&seg_queue); 2518 2519 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); 2520 2521 if (chan->state != BT_CONNECTED) { 2522 __skb_queue_purge(&seg_queue); 2523 err = -ENOTCONN; 2524 } 2525 2526 if (err) 2527 return err; 2528 2529 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); 2530 2531 l2cap_le_flowctl_send(chan); 2532 2533 if (!chan->tx_credits) 2534 chan->ops->suspend(chan); 2535 2536 err = len; 2537 2538 break; 2539 2540 case L2CAP_MODE_BASIC: 2541 /* Check outgoing MTU */ 2542 if (len > chan->omtu) 2543 return -EMSGSIZE; 2544 2545 /* Create a basic PDU */ 2546 skb = l2cap_create_basic_pdu(chan, msg, len); 2547 if (IS_ERR(skb)) 2548 return PTR_ERR(skb); 2549 2550 l2cap_do_send(chan, skb); 2551 err = len; 2552 break; 2553 2554 case L2CAP_MODE_ERTM: 2555 case L2CAP_MODE_STREAMING: 2556 /* Check outgoing MTU */ 2557 if (len > chan->omtu) { 2558 err = -EMSGSIZE; 2559 break; 2560 } 2561 2562 __skb_queue_head_init(&seg_queue); 2563 2564 /* Do segmentation before calling in to the state machine, 2565 * since it's possible to block while waiting for memory 2566 * allocation. 2567 */ 2568 err = l2cap_segment_sdu(chan, &seg_queue, msg, len); 2569 2570 if (err) 2571 break; 2572 2573 if (chan->mode == L2CAP_MODE_ERTM) 2574 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); 2575 else 2576 l2cap_streaming_send(chan, &seg_queue); 2577 2578 err = len; 2579 2580 /* If the skbs were not queued for sending, they'll still be in 2581 * seg_queue and need to be purged. 2582 */ 2583 __skb_queue_purge(&seg_queue); 2584 break; 2585 2586 default: 2587 BT_DBG("bad state %1.1x", chan->mode); 2588 err = -EBADFD; 2589 } 2590 2591 return err; 2592 } 2593 EXPORT_SYMBOL_GPL(l2cap_chan_send); 2594 2595 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) 2596 { 2597 struct l2cap_ctrl control; 2598 u16 seq; 2599 2600 BT_DBG("chan %p, txseq %u", chan, txseq); 2601 2602 memset(&control, 0, sizeof(control)); 2603 control.sframe = 1; 2604 control.super = L2CAP_SUPER_SREJ; 2605 2606 for (seq = chan->expected_tx_seq; seq != txseq; 2607 seq = __next_seq(chan, seq)) { 2608 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { 2609 control.reqseq = seq; 2610 l2cap_send_sframe(chan, &control); 2611 l2cap_seq_list_append(&chan->srej_list, seq); 2612 } 2613 } 2614 2615 chan->expected_tx_seq = __next_seq(chan, txseq); 2616 } 2617 2618 static void l2cap_send_srej_tail(struct l2cap_chan *chan) 2619 { 2620 struct l2cap_ctrl control; 2621 2622 BT_DBG("chan %p", chan); 2623 2624 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) 2625 return; 2626 2627 memset(&control, 0, sizeof(control)); 2628 control.sframe = 1; 2629 control.super = L2CAP_SUPER_SREJ; 2630 control.reqseq = chan->srej_list.tail; 2631 l2cap_send_sframe(chan, &control); 2632 } 2633 2634 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) 2635 { 2636 struct l2cap_ctrl control; 2637 u16 initial_head; 2638 u16 seq; 2639 2640 BT_DBG("chan %p, txseq %u", chan, txseq); 2641 2642 memset(&control, 0, sizeof(control)); 2643 control.sframe = 1; 2644 control.super = L2CAP_SUPER_SREJ; 2645 2646 /* Capture initial list head to allow only one pass through the list. */ 2647 initial_head = chan->srej_list.head; 2648 2649 do { 2650 seq = l2cap_seq_list_pop(&chan->srej_list); 2651 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) 2652 break; 2653 2654 control.reqseq = seq; 2655 l2cap_send_sframe(chan, &control); 2656 l2cap_seq_list_append(&chan->srej_list, seq); 2657 } while (chan->srej_list.head != initial_head); 2658 } 2659 2660 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) 2661 { 2662 struct sk_buff *acked_skb; 2663 u16 ackseq; 2664 2665 BT_DBG("chan %p, reqseq %u", chan, reqseq); 2666 2667 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) 2668 return; 2669 2670 BT_DBG("expected_ack_seq %u, unacked_frames %u", 2671 chan->expected_ack_seq, chan->unacked_frames); 2672 2673 for (ackseq = chan->expected_ack_seq; ackseq != reqseq; 2674 ackseq = __next_seq(chan, ackseq)) { 2675 2676 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); 2677 if (acked_skb) { 2678 skb_unlink(acked_skb, &chan->tx_q); 2679 kfree_skb(acked_skb); 2680 chan->unacked_frames--; 2681 } 2682 } 2683 2684 chan->expected_ack_seq = reqseq; 2685 2686 if (chan->unacked_frames == 0) 2687 __clear_retrans_timer(chan); 2688 2689 BT_DBG("unacked_frames %u", chan->unacked_frames); 2690 } 2691 2692 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) 2693 { 2694 BT_DBG("chan %p", chan); 2695 2696 chan->expected_tx_seq = chan->buffer_seq; 2697 l2cap_seq_list_clear(&chan->srej_list); 2698 skb_queue_purge(&chan->srej_q); 2699 chan->rx_state = L2CAP_RX_STATE_RECV; 2700 } 2701 2702 static void l2cap_tx_state_xmit(struct l2cap_chan *chan, 2703 struct l2cap_ctrl *control, 2704 struct sk_buff_head *skbs, u8 event) 2705 { 2706 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2707 event); 2708 2709 switch (event) { 2710 case L2CAP_EV_DATA_REQUEST: 2711 if (chan->tx_send_head == NULL) 2712 chan->tx_send_head = skb_peek(skbs); 2713 2714 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2715 l2cap_ertm_send(chan); 2716 break; 2717 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2718 BT_DBG("Enter LOCAL_BUSY"); 2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2720 2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2722 /* The SREJ_SENT state must be aborted if we are to 2723 * enter the LOCAL_BUSY state. 2724 */ 2725 l2cap_abort_rx_srej_sent(chan); 2726 } 2727 2728 l2cap_send_ack(chan); 2729 2730 break; 2731 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2732 BT_DBG("Exit LOCAL_BUSY"); 2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2734 2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2736 struct l2cap_ctrl local_control; 2737 2738 memset(&local_control, 0, sizeof(local_control)); 2739 local_control.sframe = 1; 2740 local_control.super = L2CAP_SUPER_RR; 2741 local_control.poll = 1; 2742 local_control.reqseq = chan->buffer_seq; 2743 l2cap_send_sframe(chan, &local_control); 2744 2745 chan->retry_count = 1; 2746 __set_monitor_timer(chan); 2747 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2748 } 2749 break; 2750 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2751 l2cap_process_reqseq(chan, control->reqseq); 2752 break; 2753 case L2CAP_EV_EXPLICIT_POLL: 2754 l2cap_send_rr_or_rnr(chan, 1); 2755 chan->retry_count = 1; 2756 __set_monitor_timer(chan); 2757 __clear_ack_timer(chan); 2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2759 break; 2760 case L2CAP_EV_RETRANS_TO: 2761 l2cap_send_rr_or_rnr(chan, 1); 2762 chan->retry_count = 1; 2763 __set_monitor_timer(chan); 2764 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2765 break; 2766 case L2CAP_EV_RECV_FBIT: 2767 /* Nothing to process */ 2768 break; 2769 default: 2770 break; 2771 } 2772 } 2773 2774 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, 2775 struct l2cap_ctrl *control, 2776 struct sk_buff_head *skbs, u8 event) 2777 { 2778 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2779 event); 2780 2781 switch (event) { 2782 case L2CAP_EV_DATA_REQUEST: 2783 if (chan->tx_send_head == NULL) 2784 chan->tx_send_head = skb_peek(skbs); 2785 /* Queue data, but don't send. */ 2786 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2787 break; 2788 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2789 BT_DBG("Enter LOCAL_BUSY"); 2790 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2791 2792 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2793 /* The SREJ_SENT state must be aborted if we are to 2794 * enter the LOCAL_BUSY state. 2795 */ 2796 l2cap_abort_rx_srej_sent(chan); 2797 } 2798 2799 l2cap_send_ack(chan); 2800 2801 break; 2802 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2803 BT_DBG("Exit LOCAL_BUSY"); 2804 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2805 2806 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2807 struct l2cap_ctrl local_control; 2808 memset(&local_control, 0, sizeof(local_control)); 2809 local_control.sframe = 1; 2810 local_control.super = L2CAP_SUPER_RR; 2811 local_control.poll = 1; 2812 local_control.reqseq = chan->buffer_seq; 2813 l2cap_send_sframe(chan, &local_control); 2814 2815 chan->retry_count = 1; 2816 __set_monitor_timer(chan); 2817 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2818 } 2819 break; 2820 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2821 l2cap_process_reqseq(chan, control->reqseq); 2822 fallthrough; 2823 2824 case L2CAP_EV_RECV_FBIT: 2825 if (control && control->final) { 2826 __clear_monitor_timer(chan); 2827 if (chan->unacked_frames > 0) 2828 __set_retrans_timer(chan); 2829 chan->retry_count = 0; 2830 chan->tx_state = L2CAP_TX_STATE_XMIT; 2831 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); 2832 } 2833 break; 2834 case L2CAP_EV_EXPLICIT_POLL: 2835 /* Ignore */ 2836 break; 2837 case L2CAP_EV_MONITOR_TO: 2838 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { 2839 l2cap_send_rr_or_rnr(chan, 1); 2840 __set_monitor_timer(chan); 2841 chan->retry_count++; 2842 } else { 2843 l2cap_send_disconn_req(chan, ECONNABORTED); 2844 } 2845 break; 2846 default: 2847 break; 2848 } 2849 } 2850 2851 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 2852 struct sk_buff_head *skbs, u8 event) 2853 { 2854 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", 2855 chan, control, skbs, event, chan->tx_state); 2856 2857 switch (chan->tx_state) { 2858 case L2CAP_TX_STATE_XMIT: 2859 l2cap_tx_state_xmit(chan, control, skbs, event); 2860 break; 2861 case L2CAP_TX_STATE_WAIT_F: 2862 l2cap_tx_state_wait_f(chan, control, skbs, event); 2863 break; 2864 default: 2865 /* Ignore event */ 2866 break; 2867 } 2868 } 2869 2870 static void l2cap_pass_to_tx(struct l2cap_chan *chan, 2871 struct l2cap_ctrl *control) 2872 { 2873 BT_DBG("chan %p, control %p", chan, control); 2874 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); 2875 } 2876 2877 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, 2878 struct l2cap_ctrl *control) 2879 { 2880 BT_DBG("chan %p, control %p", chan, control); 2881 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); 2882 } 2883 2884 /* Copy frame to all raw sockets on that connection */ 2885 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2886 { 2887 struct sk_buff *nskb; 2888 struct l2cap_chan *chan; 2889 2890 BT_DBG("conn %p", conn); 2891 2892 mutex_lock(&conn->chan_lock); 2893 2894 list_for_each_entry(chan, &conn->chan_l, list) { 2895 if (chan->chan_type != L2CAP_CHAN_RAW) 2896 continue; 2897 2898 /* Don't send frame to the channel it came from */ 2899 if (bt_cb(skb)->l2cap.chan == chan) 2900 continue; 2901 2902 nskb = skb_clone(skb, GFP_KERNEL); 2903 if (!nskb) 2904 continue; 2905 if (chan->ops->recv(chan, nskb)) 2906 kfree_skb(nskb); 2907 } 2908 2909 mutex_unlock(&conn->chan_lock); 2910 } 2911 2912 /* ---- L2CAP signalling commands ---- */ 2913 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, 2914 u8 ident, u16 dlen, void *data) 2915 { 2916 struct sk_buff *skb, **frag; 2917 struct l2cap_cmd_hdr *cmd; 2918 struct l2cap_hdr *lh; 2919 int len, count; 2920 2921 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 2922 conn, code, ident, dlen); 2923 2924 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) 2925 return NULL; 2926 2927 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2928 count = min_t(unsigned int, conn->mtu, len); 2929 2930 skb = bt_skb_alloc(count, GFP_KERNEL); 2931 if (!skb) 2932 return NULL; 2933 2934 lh = skb_put(skb, L2CAP_HDR_SIZE); 2935 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2936 2937 if (conn->hcon->type == LE_LINK) 2938 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2939 else 2940 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2941 2942 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); 2943 cmd->code = code; 2944 cmd->ident = ident; 2945 cmd->len = cpu_to_le16(dlen); 2946 2947 if (dlen) { 2948 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; 2949 skb_put_data(skb, data, count); 2950 data += count; 2951 } 2952 2953 len -= skb->len; 2954 2955 /* Continuation fragments (no L2CAP header) */ 2956 frag = &skb_shinfo(skb)->frag_list; 2957 while (len) { 2958 count = min_t(unsigned int, conn->mtu, len); 2959 2960 *frag = bt_skb_alloc(count, GFP_KERNEL); 2961 if (!*frag) 2962 goto fail; 2963 2964 skb_put_data(*frag, data, count); 2965 2966 len -= count; 2967 data += count; 2968 2969 frag = &(*frag)->next; 2970 } 2971 2972 return skb; 2973 2974 fail: 2975 kfree_skb(skb); 2976 return NULL; 2977 } 2978 2979 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, 2980 unsigned long *val) 2981 { 2982 struct l2cap_conf_opt *opt = *ptr; 2983 int len; 2984 2985 len = L2CAP_CONF_OPT_SIZE + opt->len; 2986 *ptr += len; 2987 2988 *type = opt->type; 2989 *olen = opt->len; 2990 2991 switch (opt->len) { 2992 case 1: 2993 *val = *((u8 *) opt->val); 2994 break; 2995 2996 case 2: 2997 *val = get_unaligned_le16(opt->val); 2998 break; 2999 3000 case 4: 3001 *val = get_unaligned_le32(opt->val); 3002 break; 3003 3004 default: 3005 *val = (unsigned long) opt->val; 3006 break; 3007 } 3008 3009 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); 3010 return len; 3011 } 3012 3013 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) 3014 { 3015 struct l2cap_conf_opt *opt = *ptr; 3016 3017 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); 3018 3019 if (size < L2CAP_CONF_OPT_SIZE + len) 3020 return; 3021 3022 opt->type = type; 3023 opt->len = len; 3024 3025 switch (len) { 3026 case 1: 3027 *((u8 *) opt->val) = val; 3028 break; 3029 3030 case 2: 3031 put_unaligned_le16(val, opt->val); 3032 break; 3033 3034 case 4: 3035 put_unaligned_le32(val, opt->val); 3036 break; 3037 3038 default: 3039 memcpy(opt->val, (void *) val, len); 3040 break; 3041 } 3042 3043 *ptr += L2CAP_CONF_OPT_SIZE + len; 3044 } 3045 3046 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) 3047 { 3048 struct l2cap_conf_efs efs; 3049 3050 switch (chan->mode) { 3051 case L2CAP_MODE_ERTM: 3052 efs.id = chan->local_id; 3053 efs.stype = chan->local_stype; 3054 efs.msdu = cpu_to_le16(chan->local_msdu); 3055 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3056 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 3057 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); 3058 break; 3059 3060 case L2CAP_MODE_STREAMING: 3061 efs.id = 1; 3062 efs.stype = L2CAP_SERV_BESTEFFORT; 3063 efs.msdu = cpu_to_le16(chan->local_msdu); 3064 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3065 efs.acc_lat = 0; 3066 efs.flush_to = 0; 3067 break; 3068 3069 default: 3070 return; 3071 } 3072 3073 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), 3074 (unsigned long) &efs, size); 3075 } 3076 3077 static void l2cap_ack_timeout(struct work_struct *work) 3078 { 3079 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 3080 ack_timer.work); 3081 u16 frames_to_ack; 3082 3083 BT_DBG("chan %p", chan); 3084 3085 l2cap_chan_lock(chan); 3086 3087 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 3088 chan->last_acked_seq); 3089 3090 if (frames_to_ack) 3091 l2cap_send_rr_or_rnr(chan, 0); 3092 3093 l2cap_chan_unlock(chan); 3094 l2cap_chan_put(chan); 3095 } 3096 3097 int l2cap_ertm_init(struct l2cap_chan *chan) 3098 { 3099 int err; 3100 3101 chan->next_tx_seq = 0; 3102 chan->expected_tx_seq = 0; 3103 chan->expected_ack_seq = 0; 3104 chan->unacked_frames = 0; 3105 chan->buffer_seq = 0; 3106 chan->frames_sent = 0; 3107 chan->last_acked_seq = 0; 3108 chan->sdu = NULL; 3109 chan->sdu_last_frag = NULL; 3110 chan->sdu_len = 0; 3111 3112 skb_queue_head_init(&chan->tx_q); 3113 3114 if (chan->mode != L2CAP_MODE_ERTM) 3115 return 0; 3116 3117 chan->rx_state = L2CAP_RX_STATE_RECV; 3118 chan->tx_state = L2CAP_TX_STATE_XMIT; 3119 3120 skb_queue_head_init(&chan->srej_q); 3121 3122 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 3123 if (err < 0) 3124 return err; 3125 3126 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 3127 if (err < 0) 3128 l2cap_seq_list_free(&chan->srej_list); 3129 3130 return err; 3131 } 3132 3133 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 3134 { 3135 switch (mode) { 3136 case L2CAP_MODE_STREAMING: 3137 case L2CAP_MODE_ERTM: 3138 if (l2cap_mode_supported(mode, remote_feat_mask)) 3139 return mode; 3140 fallthrough; 3141 default: 3142 return L2CAP_MODE_BASIC; 3143 } 3144 } 3145 3146 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) 3147 { 3148 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW); 3149 } 3150 3151 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) 3152 { 3153 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW); 3154 } 3155 3156 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, 3157 struct l2cap_conf_rfc *rfc) 3158 { 3159 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3160 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3161 } 3162 3163 static inline void l2cap_txwin_setup(struct l2cap_chan *chan) 3164 { 3165 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && 3166 __l2cap_ews_supported(chan->conn)) { 3167 /* use extended control field */ 3168 set_bit(FLAG_EXT_CTRL, &chan->flags); 3169 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3170 } else { 3171 chan->tx_win = min_t(u16, chan->tx_win, 3172 L2CAP_DEFAULT_TX_WINDOW); 3173 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 3174 } 3175 chan->ack_win = chan->tx_win; 3176 } 3177 3178 static void l2cap_mtu_auto(struct l2cap_chan *chan) 3179 { 3180 struct hci_conn *conn = chan->conn->hcon; 3181 3182 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3183 3184 /* The 2-DH1 packet has between 2 and 56 information bytes 3185 * (including the 2-byte payload header) 3186 */ 3187 if (!(conn->pkt_type & HCI_2DH1)) 3188 chan->imtu = 54; 3189 3190 /* The 3-DH1 packet has between 2 and 85 information bytes 3191 * (including the 2-byte payload header) 3192 */ 3193 if (!(conn->pkt_type & HCI_3DH1)) 3194 chan->imtu = 83; 3195 3196 /* The 2-DH3 packet has between 2 and 369 information bytes 3197 * (including the 2-byte payload header) 3198 */ 3199 if (!(conn->pkt_type & HCI_2DH3)) 3200 chan->imtu = 367; 3201 3202 /* The 3-DH3 packet has between 2 and 554 information bytes 3203 * (including the 2-byte payload header) 3204 */ 3205 if (!(conn->pkt_type & HCI_3DH3)) 3206 chan->imtu = 552; 3207 3208 /* The 2-DH5 packet has between 2 and 681 information bytes 3209 * (including the 2-byte payload header) 3210 */ 3211 if (!(conn->pkt_type & HCI_2DH5)) 3212 chan->imtu = 679; 3213 3214 /* The 3-DH5 packet has between 2 and 1023 information bytes 3215 * (including the 2-byte payload header) 3216 */ 3217 if (!(conn->pkt_type & HCI_3DH5)) 3218 chan->imtu = 1021; 3219 } 3220 3221 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) 3222 { 3223 struct l2cap_conf_req *req = data; 3224 struct l2cap_conf_rfc rfc = { .mode = chan->mode }; 3225 void *ptr = req->data; 3226 void *endptr = data + data_size; 3227 u16 size; 3228 3229 BT_DBG("chan %p", chan); 3230 3231 if (chan->num_conf_req || chan->num_conf_rsp) 3232 goto done; 3233 3234 switch (chan->mode) { 3235 case L2CAP_MODE_STREAMING: 3236 case L2CAP_MODE_ERTM: 3237 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) 3238 break; 3239 3240 if (__l2cap_efs_supported(chan->conn)) 3241 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3242 3243 fallthrough; 3244 default: 3245 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); 3246 break; 3247 } 3248 3249 done: 3250 if (chan->imtu != L2CAP_DEFAULT_MTU) { 3251 if (!chan->imtu) 3252 l2cap_mtu_auto(chan); 3253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, 3254 endptr - ptr); 3255 } 3256 3257 switch (chan->mode) { 3258 case L2CAP_MODE_BASIC: 3259 if (disable_ertm) 3260 break; 3261 3262 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3263 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3264 break; 3265 3266 rfc.mode = L2CAP_MODE_BASIC; 3267 rfc.txwin_size = 0; 3268 rfc.max_transmit = 0; 3269 rfc.retrans_timeout = 0; 3270 rfc.monitor_timeout = 0; 3271 rfc.max_pdu_size = 0; 3272 3273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3274 (unsigned long) &rfc, endptr - ptr); 3275 break; 3276 3277 case L2CAP_MODE_ERTM: 3278 rfc.mode = L2CAP_MODE_ERTM; 3279 rfc.max_transmit = chan->max_tx; 3280 3281 __l2cap_set_ertm_timeouts(chan, &rfc); 3282 3283 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3284 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3285 L2CAP_FCS_SIZE); 3286 rfc.max_pdu_size = cpu_to_le16(size); 3287 3288 l2cap_txwin_setup(chan); 3289 3290 rfc.txwin_size = min_t(u16, chan->tx_win, 3291 L2CAP_DEFAULT_TX_WINDOW); 3292 3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3294 (unsigned long) &rfc, endptr - ptr); 3295 3296 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3297 l2cap_add_opt_efs(&ptr, chan, endptr - ptr); 3298 3299 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3301 chan->tx_win, endptr - ptr); 3302 3303 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3304 if (chan->fcs == L2CAP_FCS_NONE || 3305 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3306 chan->fcs = L2CAP_FCS_NONE; 3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3308 chan->fcs, endptr - ptr); 3309 } 3310 break; 3311 3312 case L2CAP_MODE_STREAMING: 3313 l2cap_txwin_setup(chan); 3314 rfc.mode = L2CAP_MODE_STREAMING; 3315 rfc.txwin_size = 0; 3316 rfc.max_transmit = 0; 3317 rfc.retrans_timeout = 0; 3318 rfc.monitor_timeout = 0; 3319 3320 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3321 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3322 L2CAP_FCS_SIZE); 3323 rfc.max_pdu_size = cpu_to_le16(size); 3324 3325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3326 (unsigned long) &rfc, endptr - ptr); 3327 3328 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3329 l2cap_add_opt_efs(&ptr, chan, endptr - ptr); 3330 3331 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3332 if (chan->fcs == L2CAP_FCS_NONE || 3333 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3334 chan->fcs = L2CAP_FCS_NONE; 3335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3336 chan->fcs, endptr - ptr); 3337 } 3338 break; 3339 } 3340 3341 req->dcid = cpu_to_le16(chan->dcid); 3342 req->flags = cpu_to_le16(0); 3343 3344 return ptr - data; 3345 } 3346 3347 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) 3348 { 3349 struct l2cap_conf_rsp *rsp = data; 3350 void *ptr = rsp->data; 3351 void *endptr = data + data_size; 3352 void *req = chan->conf_req; 3353 int len = chan->conf_len; 3354 int type, hint, olen; 3355 unsigned long val; 3356 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3357 struct l2cap_conf_efs efs; 3358 u8 remote_efs = 0; 3359 u16 mtu = L2CAP_DEFAULT_MTU; 3360 u16 result = L2CAP_CONF_SUCCESS; 3361 u16 size; 3362 3363 BT_DBG("chan %p", chan); 3364 3365 while (len >= L2CAP_CONF_OPT_SIZE) { 3366 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 3367 if (len < 0) 3368 break; 3369 3370 hint = type & L2CAP_CONF_HINT; 3371 type &= L2CAP_CONF_MASK; 3372 3373 switch (type) { 3374 case L2CAP_CONF_MTU: 3375 if (olen != 2) 3376 break; 3377 mtu = val; 3378 break; 3379 3380 case L2CAP_CONF_FLUSH_TO: 3381 if (olen != 2) 3382 break; 3383 chan->flush_to = val; 3384 break; 3385 3386 case L2CAP_CONF_QOS: 3387 break; 3388 3389 case L2CAP_CONF_RFC: 3390 if (olen != sizeof(rfc)) 3391 break; 3392 memcpy(&rfc, (void *) val, olen); 3393 break; 3394 3395 case L2CAP_CONF_FCS: 3396 if (olen != 1) 3397 break; 3398 if (val == L2CAP_FCS_NONE) 3399 set_bit(CONF_RECV_NO_FCS, &chan->conf_state); 3400 break; 3401 3402 case L2CAP_CONF_EFS: 3403 if (olen != sizeof(efs)) 3404 break; 3405 remote_efs = 1; 3406 memcpy(&efs, (void *) val, olen); 3407 break; 3408 3409 case L2CAP_CONF_EWS: 3410 if (olen != 2) 3411 break; 3412 return -ECONNREFUSED; 3413 3414 default: 3415 if (hint) 3416 break; 3417 result = L2CAP_CONF_UNKNOWN; 3418 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); 3419 break; 3420 } 3421 } 3422 3423 if (chan->num_conf_rsp || chan->num_conf_req > 1) 3424 goto done; 3425 3426 switch (chan->mode) { 3427 case L2CAP_MODE_STREAMING: 3428 case L2CAP_MODE_ERTM: 3429 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { 3430 chan->mode = l2cap_select_mode(rfc.mode, 3431 chan->conn->feat_mask); 3432 break; 3433 } 3434 3435 if (remote_efs) { 3436 if (__l2cap_efs_supported(chan->conn)) 3437 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3438 else 3439 return -ECONNREFUSED; 3440 } 3441 3442 if (chan->mode != rfc.mode) 3443 return -ECONNREFUSED; 3444 3445 break; 3446 } 3447 3448 done: 3449 if (chan->mode != rfc.mode) { 3450 result = L2CAP_CONF_UNACCEPT; 3451 rfc.mode = chan->mode; 3452 3453 if (chan->num_conf_rsp == 1) 3454 return -ECONNREFUSED; 3455 3456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3457 (unsigned long) &rfc, endptr - ptr); 3458 } 3459 3460 if (result == L2CAP_CONF_SUCCESS) { 3461 /* Configure output options and let the other side know 3462 * which ones we don't like. */ 3463 3464 if (mtu < L2CAP_DEFAULT_MIN_MTU) 3465 result = L2CAP_CONF_UNACCEPT; 3466 else { 3467 chan->omtu = mtu; 3468 set_bit(CONF_MTU_DONE, &chan->conf_state); 3469 } 3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); 3471 3472 if (remote_efs) { 3473 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3474 efs.stype != L2CAP_SERV_NOTRAFIC && 3475 efs.stype != chan->local_stype) { 3476 3477 result = L2CAP_CONF_UNACCEPT; 3478 3479 if (chan->num_conf_req >= 1) 3480 return -ECONNREFUSED; 3481 3482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3483 sizeof(efs), 3484 (unsigned long) &efs, endptr - ptr); 3485 } else { 3486 /* Send PENDING Conf Rsp */ 3487 result = L2CAP_CONF_PENDING; 3488 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 3489 } 3490 } 3491 3492 switch (rfc.mode) { 3493 case L2CAP_MODE_BASIC: 3494 chan->fcs = L2CAP_FCS_NONE; 3495 set_bit(CONF_MODE_DONE, &chan->conf_state); 3496 break; 3497 3498 case L2CAP_MODE_ERTM: 3499 if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) 3500 chan->remote_tx_win = rfc.txwin_size; 3501 else 3502 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 3503 3504 chan->remote_max_tx = rfc.max_transmit; 3505 3506 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3507 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3508 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3509 rfc.max_pdu_size = cpu_to_le16(size); 3510 chan->remote_mps = size; 3511 3512 __l2cap_set_ertm_timeouts(chan, &rfc); 3513 3514 set_bit(CONF_MODE_DONE, &chan->conf_state); 3515 3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3517 sizeof(rfc), (unsigned long) &rfc, endptr - ptr); 3518 3519 if (remote_efs && 3520 test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3521 chan->remote_id = efs.id; 3522 chan->remote_stype = efs.stype; 3523 chan->remote_msdu = le16_to_cpu(efs.msdu); 3524 chan->remote_flush_to = 3525 le32_to_cpu(efs.flush_to); 3526 chan->remote_acc_lat = 3527 le32_to_cpu(efs.acc_lat); 3528 chan->remote_sdu_itime = 3529 le32_to_cpu(efs.sdu_itime); 3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3531 sizeof(efs), 3532 (unsigned long) &efs, endptr - ptr); 3533 } 3534 break; 3535 3536 case L2CAP_MODE_STREAMING: 3537 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3538 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3539 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3540 rfc.max_pdu_size = cpu_to_le16(size); 3541 chan->remote_mps = size; 3542 3543 set_bit(CONF_MODE_DONE, &chan->conf_state); 3544 3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3546 (unsigned long) &rfc, endptr - ptr); 3547 3548 break; 3549 3550 default: 3551 result = L2CAP_CONF_UNACCEPT; 3552 3553 memset(&rfc, 0, sizeof(rfc)); 3554 rfc.mode = chan->mode; 3555 } 3556 3557 if (result == L2CAP_CONF_SUCCESS) 3558 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 3559 } 3560 rsp->scid = cpu_to_le16(chan->dcid); 3561 rsp->result = cpu_to_le16(result); 3562 rsp->flags = cpu_to_le16(0); 3563 3564 return ptr - data; 3565 } 3566 3567 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, 3568 void *data, size_t size, u16 *result) 3569 { 3570 struct l2cap_conf_req *req = data; 3571 void *ptr = req->data; 3572 void *endptr = data + size; 3573 int type, olen; 3574 unsigned long val; 3575 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3576 struct l2cap_conf_efs efs; 3577 3578 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 3579 3580 while (len >= L2CAP_CONF_OPT_SIZE) { 3581 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3582 if (len < 0) 3583 break; 3584 3585 switch (type) { 3586 case L2CAP_CONF_MTU: 3587 if (olen != 2) 3588 break; 3589 if (val < L2CAP_DEFAULT_MIN_MTU) { 3590 *result = L2CAP_CONF_UNACCEPT; 3591 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3592 } else 3593 chan->imtu = val; 3594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, 3595 endptr - ptr); 3596 break; 3597 3598 case L2CAP_CONF_FLUSH_TO: 3599 if (olen != 2) 3600 break; 3601 chan->flush_to = val; 3602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, 3603 chan->flush_to, endptr - ptr); 3604 break; 3605 3606 case L2CAP_CONF_RFC: 3607 if (olen != sizeof(rfc)) 3608 break; 3609 memcpy(&rfc, (void *)val, olen); 3610 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && 3611 rfc.mode != chan->mode) 3612 return -ECONNREFUSED; 3613 chan->fcs = 0; 3614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3615 (unsigned long) &rfc, endptr - ptr); 3616 break; 3617 3618 case L2CAP_CONF_EWS: 3619 if (olen != 2) 3620 break; 3621 chan->ack_win = min_t(u16, val, chan->ack_win); 3622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3623 chan->tx_win, endptr - ptr); 3624 break; 3625 3626 case L2CAP_CONF_EFS: 3627 if (olen != sizeof(efs)) 3628 break; 3629 memcpy(&efs, (void *)val, olen); 3630 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3631 efs.stype != L2CAP_SERV_NOTRAFIC && 3632 efs.stype != chan->local_stype) 3633 return -ECONNREFUSED; 3634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3635 (unsigned long) &efs, endptr - ptr); 3636 break; 3637 3638 case L2CAP_CONF_FCS: 3639 if (olen != 1) 3640 break; 3641 if (*result == L2CAP_CONF_PENDING) 3642 if (val == L2CAP_FCS_NONE) 3643 set_bit(CONF_RECV_NO_FCS, 3644 &chan->conf_state); 3645 break; 3646 } 3647 } 3648 3649 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) 3650 return -ECONNREFUSED; 3651 3652 chan->mode = rfc.mode; 3653 3654 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { 3655 switch (rfc.mode) { 3656 case L2CAP_MODE_ERTM: 3657 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3658 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3659 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3660 if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) 3661 chan->ack_win = min_t(u16, chan->ack_win, 3662 rfc.txwin_size); 3663 3664 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3665 chan->local_msdu = le16_to_cpu(efs.msdu); 3666 chan->local_sdu_itime = 3667 le32_to_cpu(efs.sdu_itime); 3668 chan->local_acc_lat = le32_to_cpu(efs.acc_lat); 3669 chan->local_flush_to = 3670 le32_to_cpu(efs.flush_to); 3671 } 3672 break; 3673 3674 case L2CAP_MODE_STREAMING: 3675 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3676 } 3677 } 3678 3679 req->dcid = cpu_to_le16(chan->dcid); 3680 req->flags = cpu_to_le16(0); 3681 3682 return ptr - data; 3683 } 3684 3685 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, 3686 u16 result, u16 flags) 3687 { 3688 struct l2cap_conf_rsp *rsp = data; 3689 void *ptr = rsp->data; 3690 3691 BT_DBG("chan %p", chan); 3692 3693 rsp->scid = cpu_to_le16(chan->dcid); 3694 rsp->result = cpu_to_le16(result); 3695 rsp->flags = cpu_to_le16(flags); 3696 3697 return ptr - data; 3698 } 3699 3700 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) 3701 { 3702 struct l2cap_le_conn_rsp rsp; 3703 struct l2cap_conn *conn = chan->conn; 3704 3705 BT_DBG("chan %p", chan); 3706 3707 rsp.dcid = cpu_to_le16(chan->scid); 3708 rsp.mtu = cpu_to_le16(chan->imtu); 3709 rsp.mps = cpu_to_le16(chan->mps); 3710 rsp.credits = cpu_to_le16(chan->rx_credits); 3711 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); 3712 3713 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 3714 &rsp); 3715 } 3716 3717 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) 3718 { 3719 int *result = data; 3720 3721 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 3722 return; 3723 3724 switch (chan->state) { 3725 case BT_CONNECT2: 3726 /* If channel still pending accept add to result */ 3727 (*result)++; 3728 return; 3729 case BT_CONNECTED: 3730 return; 3731 default: 3732 /* If not connected or pending accept it has been refused */ 3733 *result = -ECONNREFUSED; 3734 return; 3735 } 3736 } 3737 3738 struct l2cap_ecred_rsp_data { 3739 struct { 3740 struct l2cap_ecred_conn_rsp rsp; 3741 __le16 scid[L2CAP_ECRED_MAX_CID]; 3742 } __packed pdu; 3743 int count; 3744 }; 3745 3746 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) 3747 { 3748 struct l2cap_ecred_rsp_data *rsp = data; 3749 3750 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 3751 return; 3752 3753 /* Reset ident so only one response is sent */ 3754 chan->ident = 0; 3755 3756 /* Include all channels pending with the same ident */ 3757 if (!rsp->pdu.rsp.result) 3758 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid); 3759 else 3760 l2cap_chan_del(chan, ECONNRESET); 3761 } 3762 3763 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) 3764 { 3765 struct l2cap_conn *conn = chan->conn; 3766 struct l2cap_ecred_rsp_data data; 3767 u16 id = chan->ident; 3768 int result = 0; 3769 3770 if (!id) 3771 return; 3772 3773 BT_DBG("chan %p id %d", chan, id); 3774 3775 memset(&data, 0, sizeof(data)); 3776 3777 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); 3778 data.pdu.rsp.mps = cpu_to_le16(chan->mps); 3779 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); 3780 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); 3781 3782 /* Verify that all channels are ready */ 3783 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); 3784 3785 if (result > 0) 3786 return; 3787 3788 if (result < 0) 3789 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); 3790 3791 /* Build response */ 3792 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); 3793 3794 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, 3795 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), 3796 &data.pdu); 3797 } 3798 3799 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) 3800 { 3801 struct l2cap_conn_rsp rsp; 3802 struct l2cap_conn *conn = chan->conn; 3803 u8 buf[128]; 3804 u8 rsp_code; 3805 3806 rsp.scid = cpu_to_le16(chan->dcid); 3807 rsp.dcid = cpu_to_le16(chan->scid); 3808 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3809 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3810 rsp_code = L2CAP_CONN_RSP; 3811 3812 BT_DBG("chan %p rsp_code %u", chan, rsp_code); 3813 3814 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); 3815 3816 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 3817 return; 3818 3819 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3820 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 3821 chan->num_conf_req++; 3822 } 3823 3824 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) 3825 { 3826 int type, olen; 3827 unsigned long val; 3828 /* Use sane default values in case a misbehaving remote device 3829 * did not send an RFC or extended window size option. 3830 */ 3831 u16 txwin_ext = chan->ack_win; 3832 struct l2cap_conf_rfc rfc = { 3833 .mode = chan->mode, 3834 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), 3835 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), 3836 .max_pdu_size = cpu_to_le16(chan->imtu), 3837 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), 3838 }; 3839 3840 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 3841 3842 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) 3843 return; 3844 3845 while (len >= L2CAP_CONF_OPT_SIZE) { 3846 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3847 if (len < 0) 3848 break; 3849 3850 switch (type) { 3851 case L2CAP_CONF_RFC: 3852 if (olen != sizeof(rfc)) 3853 break; 3854 memcpy(&rfc, (void *)val, olen); 3855 break; 3856 case L2CAP_CONF_EWS: 3857 if (olen != 2) 3858 break; 3859 txwin_ext = val; 3860 break; 3861 } 3862 } 3863 3864 switch (rfc.mode) { 3865 case L2CAP_MODE_ERTM: 3866 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3867 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3868 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3869 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3870 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); 3871 else 3872 chan->ack_win = min_t(u16, chan->ack_win, 3873 rfc.txwin_size); 3874 break; 3875 case L2CAP_MODE_STREAMING: 3876 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3877 } 3878 } 3879 3880 static inline int l2cap_command_rej(struct l2cap_conn *conn, 3881 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 3882 u8 *data) 3883 { 3884 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 3885 3886 if (cmd_len < sizeof(*rej)) 3887 return -EPROTO; 3888 3889 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 3890 return 0; 3891 3892 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 3893 cmd->ident == conn->info_ident) { 3894 cancel_delayed_work(&conn->info_timer); 3895 3896 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3897 conn->info_ident = 0; 3898 3899 l2cap_conn_start(conn); 3900 } 3901 3902 return 0; 3903 } 3904 3905 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, 3906 struct l2cap_cmd_hdr *cmd, 3907 u8 *data, u8 rsp_code, u8 amp_id) 3908 { 3909 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 3910 struct l2cap_conn_rsp rsp; 3911 struct l2cap_chan *chan = NULL, *pchan; 3912 int result, status = L2CAP_CS_NO_INFO; 3913 3914 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 3915 __le16 psm = req->psm; 3916 3917 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); 3918 3919 /* Check if we have socket listening on psm */ 3920 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 3921 &conn->hcon->dst, ACL_LINK); 3922 if (!pchan) { 3923 result = L2CAP_CR_BAD_PSM; 3924 goto sendresp; 3925 } 3926 3927 mutex_lock(&conn->chan_lock); 3928 l2cap_chan_lock(pchan); 3929 3930 /* Check if the ACL is secure enough (if not SDP) */ 3931 if (psm != cpu_to_le16(L2CAP_PSM_SDP) && 3932 !hci_conn_check_link_mode(conn->hcon)) { 3933 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3934 result = L2CAP_CR_SEC_BLOCK; 3935 goto response; 3936 } 3937 3938 result = L2CAP_CR_NO_MEM; 3939 3940 /* Check for valid dynamic CID range (as per Erratum 3253) */ 3941 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { 3942 result = L2CAP_CR_INVALID_SCID; 3943 goto response; 3944 } 3945 3946 /* Check if we already have channel with that dcid */ 3947 if (__l2cap_get_chan_by_dcid(conn, scid)) { 3948 result = L2CAP_CR_SCID_IN_USE; 3949 goto response; 3950 } 3951 3952 chan = pchan->ops->new_connection(pchan); 3953 if (!chan) 3954 goto response; 3955 3956 /* For certain devices (ex: HID mouse), support for authentication, 3957 * pairing and bonding is optional. For such devices, inorder to avoid 3958 * the ACL alive for too long after L2CAP disconnection, reset the ACL 3959 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. 3960 */ 3961 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 3962 3963 bacpy(&chan->src, &conn->hcon->src); 3964 bacpy(&chan->dst, &conn->hcon->dst); 3965 chan->src_type = bdaddr_src_type(conn->hcon); 3966 chan->dst_type = bdaddr_dst_type(conn->hcon); 3967 chan->psm = psm; 3968 chan->dcid = scid; 3969 3970 __l2cap_chan_add(conn, chan); 3971 3972 dcid = chan->scid; 3973 3974 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 3975 3976 chan->ident = cmd->ident; 3977 3978 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 3979 if (l2cap_chan_check_security(chan, false)) { 3980 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 3981 l2cap_state_change(chan, BT_CONNECT2); 3982 result = L2CAP_CR_PEND; 3983 status = L2CAP_CS_AUTHOR_PEND; 3984 chan->ops->defer(chan); 3985 } else { 3986 /* Force pending result for AMP controllers. 3987 * The connection will succeed after the 3988 * physical link is up. 3989 */ 3990 if (amp_id == AMP_ID_BREDR) { 3991 l2cap_state_change(chan, BT_CONFIG); 3992 result = L2CAP_CR_SUCCESS; 3993 } else { 3994 l2cap_state_change(chan, BT_CONNECT2); 3995 result = L2CAP_CR_PEND; 3996 } 3997 status = L2CAP_CS_NO_INFO; 3998 } 3999 } else { 4000 l2cap_state_change(chan, BT_CONNECT2); 4001 result = L2CAP_CR_PEND; 4002 status = L2CAP_CS_AUTHEN_PEND; 4003 } 4004 } else { 4005 l2cap_state_change(chan, BT_CONNECT2); 4006 result = L2CAP_CR_PEND; 4007 status = L2CAP_CS_NO_INFO; 4008 } 4009 4010 response: 4011 l2cap_chan_unlock(pchan); 4012 mutex_unlock(&conn->chan_lock); 4013 l2cap_chan_put(pchan); 4014 4015 sendresp: 4016 rsp.scid = cpu_to_le16(scid); 4017 rsp.dcid = cpu_to_le16(dcid); 4018 rsp.result = cpu_to_le16(result); 4019 rsp.status = cpu_to_le16(status); 4020 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); 4021 4022 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 4023 struct l2cap_info_req info; 4024 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4025 4026 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 4027 conn->info_ident = l2cap_get_ident(conn); 4028 4029 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 4030 4031 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 4032 sizeof(info), &info); 4033 } 4034 4035 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && 4036 result == L2CAP_CR_SUCCESS) { 4037 u8 buf[128]; 4038 set_bit(CONF_REQ_SENT, &chan->conf_state); 4039 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4040 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4041 chan->num_conf_req++; 4042 } 4043 4044 return chan; 4045 } 4046 4047 static int l2cap_connect_req(struct l2cap_conn *conn, 4048 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 4049 { 4050 struct hci_dev *hdev = conn->hcon->hdev; 4051 struct hci_conn *hcon = conn->hcon; 4052 4053 if (cmd_len < sizeof(struct l2cap_conn_req)) 4054 return -EPROTO; 4055 4056 hci_dev_lock(hdev); 4057 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4058 mgmt_device_connected(hdev, hcon, NULL, 0); 4059 hci_dev_unlock(hdev); 4060 4061 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 4062 return 0; 4063 } 4064 4065 static int l2cap_connect_create_rsp(struct l2cap_conn *conn, 4066 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4067 u8 *data) 4068 { 4069 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 4070 u16 scid, dcid, result, status; 4071 struct l2cap_chan *chan; 4072 u8 req[128]; 4073 int err; 4074 4075 if (cmd_len < sizeof(*rsp)) 4076 return -EPROTO; 4077 4078 scid = __le16_to_cpu(rsp->scid); 4079 dcid = __le16_to_cpu(rsp->dcid); 4080 result = __le16_to_cpu(rsp->result); 4081 status = __le16_to_cpu(rsp->status); 4082 4083 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || 4084 dcid > L2CAP_CID_DYN_END)) 4085 return -EPROTO; 4086 4087 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", 4088 dcid, scid, result, status); 4089 4090 mutex_lock(&conn->chan_lock); 4091 4092 if (scid) { 4093 chan = __l2cap_get_chan_by_scid(conn, scid); 4094 if (!chan) { 4095 err = -EBADSLT; 4096 goto unlock; 4097 } 4098 } else { 4099 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 4100 if (!chan) { 4101 err = -EBADSLT; 4102 goto unlock; 4103 } 4104 } 4105 4106 chan = l2cap_chan_hold_unless_zero(chan); 4107 if (!chan) { 4108 err = -EBADSLT; 4109 goto unlock; 4110 } 4111 4112 err = 0; 4113 4114 l2cap_chan_lock(chan); 4115 4116 switch (result) { 4117 case L2CAP_CR_SUCCESS: 4118 if (__l2cap_get_chan_by_dcid(conn, dcid)) { 4119 err = -EBADSLT; 4120 break; 4121 } 4122 4123 l2cap_state_change(chan, BT_CONFIG); 4124 chan->ident = 0; 4125 chan->dcid = dcid; 4126 clear_bit(CONF_CONNECT_PEND, &chan->conf_state); 4127 4128 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 4129 break; 4130 4131 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4132 l2cap_build_conf_req(chan, req, sizeof(req)), req); 4133 chan->num_conf_req++; 4134 break; 4135 4136 case L2CAP_CR_PEND: 4137 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 4138 break; 4139 4140 default: 4141 l2cap_chan_del(chan, ECONNREFUSED); 4142 break; 4143 } 4144 4145 l2cap_chan_unlock(chan); 4146 l2cap_chan_put(chan); 4147 4148 unlock: 4149 mutex_unlock(&conn->chan_lock); 4150 4151 return err; 4152 } 4153 4154 static inline void set_default_fcs(struct l2cap_chan *chan) 4155 { 4156 /* FCS is enabled only in ERTM or streaming mode, if one or both 4157 * sides request it. 4158 */ 4159 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 4160 chan->fcs = L2CAP_FCS_NONE; 4161 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) 4162 chan->fcs = L2CAP_FCS_CRC16; 4163 } 4164 4165 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, 4166 u8 ident, u16 flags) 4167 { 4168 struct l2cap_conn *conn = chan->conn; 4169 4170 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, 4171 flags); 4172 4173 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 4174 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 4175 4176 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, 4177 l2cap_build_conf_rsp(chan, data, 4178 L2CAP_CONF_SUCCESS, flags), data); 4179 } 4180 4181 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, 4182 u16 scid, u16 dcid) 4183 { 4184 struct l2cap_cmd_rej_cid rej; 4185 4186 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 4187 rej.scid = __cpu_to_le16(scid); 4188 rej.dcid = __cpu_to_le16(dcid); 4189 4190 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4191 } 4192 4193 static inline int l2cap_config_req(struct l2cap_conn *conn, 4194 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4195 u8 *data) 4196 { 4197 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 4198 u16 dcid, flags; 4199 u8 rsp[64]; 4200 struct l2cap_chan *chan; 4201 int len, err = 0; 4202 4203 if (cmd_len < sizeof(*req)) 4204 return -EPROTO; 4205 4206 dcid = __le16_to_cpu(req->dcid); 4207 flags = __le16_to_cpu(req->flags); 4208 4209 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 4210 4211 chan = l2cap_get_chan_by_scid(conn, dcid); 4212 if (!chan) { 4213 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); 4214 return 0; 4215 } 4216 4217 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && 4218 chan->state != BT_CONNECTED) { 4219 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, 4220 chan->dcid); 4221 goto unlock; 4222 } 4223 4224 /* Reject if config buffer is too small. */ 4225 len = cmd_len - sizeof(*req); 4226 if (chan->conf_len + len > sizeof(chan->conf_req)) { 4227 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4228 l2cap_build_conf_rsp(chan, rsp, 4229 L2CAP_CONF_REJECT, flags), rsp); 4230 goto unlock; 4231 } 4232 4233 /* Store config. */ 4234 memcpy(chan->conf_req + chan->conf_len, req->data, len); 4235 chan->conf_len += len; 4236 4237 if (flags & L2CAP_CONF_FLAG_CONTINUATION) { 4238 /* Incomplete config. Send empty response. */ 4239 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4240 l2cap_build_conf_rsp(chan, rsp, 4241 L2CAP_CONF_SUCCESS, flags), rsp); 4242 goto unlock; 4243 } 4244 4245 /* Complete config. */ 4246 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); 4247 if (len < 0) { 4248 l2cap_send_disconn_req(chan, ECONNRESET); 4249 goto unlock; 4250 } 4251 4252 chan->ident = cmd->ident; 4253 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 4254 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) 4255 chan->num_conf_rsp++; 4256 4257 /* Reset config buffer. */ 4258 chan->conf_len = 0; 4259 4260 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) 4261 goto unlock; 4262 4263 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 4264 set_default_fcs(chan); 4265 4266 if (chan->mode == L2CAP_MODE_ERTM || 4267 chan->mode == L2CAP_MODE_STREAMING) 4268 err = l2cap_ertm_init(chan); 4269 4270 if (err < 0) 4271 l2cap_send_disconn_req(chan, -err); 4272 else 4273 l2cap_chan_ready(chan); 4274 4275 goto unlock; 4276 } 4277 4278 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { 4279 u8 buf[64]; 4280 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4281 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4282 chan->num_conf_req++; 4283 } 4284 4285 /* Got Conf Rsp PENDING from remote side and assume we sent 4286 Conf Rsp PENDING in the code above */ 4287 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && 4288 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4289 4290 /* check compatibility */ 4291 4292 /* Send rsp for BR/EDR channel */ 4293 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); 4294 } 4295 4296 unlock: 4297 l2cap_chan_unlock(chan); 4298 l2cap_chan_put(chan); 4299 return err; 4300 } 4301 4302 static inline int l2cap_config_rsp(struct l2cap_conn *conn, 4303 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4304 u8 *data) 4305 { 4306 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 4307 u16 scid, flags, result; 4308 struct l2cap_chan *chan; 4309 int len = cmd_len - sizeof(*rsp); 4310 int err = 0; 4311 4312 if (cmd_len < sizeof(*rsp)) 4313 return -EPROTO; 4314 4315 scid = __le16_to_cpu(rsp->scid); 4316 flags = __le16_to_cpu(rsp->flags); 4317 result = __le16_to_cpu(rsp->result); 4318 4319 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, 4320 result, len); 4321 4322 chan = l2cap_get_chan_by_scid(conn, scid); 4323 if (!chan) 4324 return 0; 4325 4326 switch (result) { 4327 case L2CAP_CONF_SUCCESS: 4328 l2cap_conf_rfc_get(chan, rsp->data, len); 4329 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4330 break; 4331 4332 case L2CAP_CONF_PENDING: 4333 set_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4334 4335 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4336 char buf[64]; 4337 4338 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4339 buf, sizeof(buf), &result); 4340 if (len < 0) { 4341 l2cap_send_disconn_req(chan, ECONNRESET); 4342 goto done; 4343 } 4344 4345 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0); 4346 } 4347 goto done; 4348 4349 case L2CAP_CONF_UNKNOWN: 4350 case L2CAP_CONF_UNACCEPT: 4351 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 4352 char req[64]; 4353 4354 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 4355 l2cap_send_disconn_req(chan, ECONNRESET); 4356 goto done; 4357 } 4358 4359 /* throw out any old stored conf requests */ 4360 result = L2CAP_CONF_SUCCESS; 4361 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4362 req, sizeof(req), &result); 4363 if (len < 0) { 4364 l2cap_send_disconn_req(chan, ECONNRESET); 4365 goto done; 4366 } 4367 4368 l2cap_send_cmd(conn, l2cap_get_ident(conn), 4369 L2CAP_CONF_REQ, len, req); 4370 chan->num_conf_req++; 4371 if (result != L2CAP_CONF_SUCCESS) 4372 goto done; 4373 break; 4374 } 4375 fallthrough; 4376 4377 default: 4378 l2cap_chan_set_err(chan, ECONNRESET); 4379 4380 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 4381 l2cap_send_disconn_req(chan, ECONNRESET); 4382 goto done; 4383 } 4384 4385 if (flags & L2CAP_CONF_FLAG_CONTINUATION) 4386 goto done; 4387 4388 set_bit(CONF_INPUT_DONE, &chan->conf_state); 4389 4390 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 4391 set_default_fcs(chan); 4392 4393 if (chan->mode == L2CAP_MODE_ERTM || 4394 chan->mode == L2CAP_MODE_STREAMING) 4395 err = l2cap_ertm_init(chan); 4396 4397 if (err < 0) 4398 l2cap_send_disconn_req(chan, -err); 4399 else 4400 l2cap_chan_ready(chan); 4401 } 4402 4403 done: 4404 l2cap_chan_unlock(chan); 4405 l2cap_chan_put(chan); 4406 return err; 4407 } 4408 4409 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, 4410 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4411 u8 *data) 4412 { 4413 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4414 struct l2cap_disconn_rsp rsp; 4415 u16 dcid, scid; 4416 struct l2cap_chan *chan; 4417 4418 if (cmd_len != sizeof(*req)) 4419 return -EPROTO; 4420 4421 scid = __le16_to_cpu(req->scid); 4422 dcid = __le16_to_cpu(req->dcid); 4423 4424 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 4425 4426 chan = l2cap_get_chan_by_scid(conn, dcid); 4427 if (!chan) { 4428 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); 4429 return 0; 4430 } 4431 4432 rsp.dcid = cpu_to_le16(chan->scid); 4433 rsp.scid = cpu_to_le16(chan->dcid); 4434 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 4435 4436 chan->ops->set_shutdown(chan); 4437 4438 l2cap_chan_unlock(chan); 4439 mutex_lock(&conn->chan_lock); 4440 l2cap_chan_lock(chan); 4441 l2cap_chan_del(chan, ECONNRESET); 4442 mutex_unlock(&conn->chan_lock); 4443 4444 chan->ops->close(chan); 4445 4446 l2cap_chan_unlock(chan); 4447 l2cap_chan_put(chan); 4448 4449 return 0; 4450 } 4451 4452 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, 4453 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4454 u8 *data) 4455 { 4456 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4457 u16 dcid, scid; 4458 struct l2cap_chan *chan; 4459 4460 if (cmd_len != sizeof(*rsp)) 4461 return -EPROTO; 4462 4463 scid = __le16_to_cpu(rsp->scid); 4464 dcid = __le16_to_cpu(rsp->dcid); 4465 4466 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 4467 4468 chan = l2cap_get_chan_by_scid(conn, scid); 4469 if (!chan) { 4470 return 0; 4471 } 4472 4473 if (chan->state != BT_DISCONN) { 4474 l2cap_chan_unlock(chan); 4475 l2cap_chan_put(chan); 4476 return 0; 4477 } 4478 4479 l2cap_chan_unlock(chan); 4480 mutex_lock(&conn->chan_lock); 4481 l2cap_chan_lock(chan); 4482 l2cap_chan_del(chan, 0); 4483 mutex_unlock(&conn->chan_lock); 4484 4485 chan->ops->close(chan); 4486 4487 l2cap_chan_unlock(chan); 4488 l2cap_chan_put(chan); 4489 4490 return 0; 4491 } 4492 4493 static inline int l2cap_information_req(struct l2cap_conn *conn, 4494 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4495 u8 *data) 4496 { 4497 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4498 u16 type; 4499 4500 if (cmd_len != sizeof(*req)) 4501 return -EPROTO; 4502 4503 type = __le16_to_cpu(req->type); 4504 4505 BT_DBG("type 0x%4.4x", type); 4506 4507 if (type == L2CAP_IT_FEAT_MASK) { 4508 u8 buf[8]; 4509 u32 feat_mask = l2cap_feat_mask; 4510 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4511 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4512 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4513 if (!disable_ertm) 4514 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 4515 | L2CAP_FEAT_FCS; 4516 4517 put_unaligned_le32(feat_mask, rsp->data); 4518 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4519 buf); 4520 } else if (type == L2CAP_IT_FIXED_CHAN) { 4521 u8 buf[12]; 4522 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4523 4524 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4525 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4526 rsp->data[0] = conn->local_fixed_chan; 4527 memset(rsp->data + 1, 0, 7); 4528 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4529 buf); 4530 } else { 4531 struct l2cap_info_rsp rsp; 4532 rsp.type = cpu_to_le16(type); 4533 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 4534 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), 4535 &rsp); 4536 } 4537 4538 return 0; 4539 } 4540 4541 static inline int l2cap_information_rsp(struct l2cap_conn *conn, 4542 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4543 u8 *data) 4544 { 4545 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4546 u16 type, result; 4547 4548 if (cmd_len < sizeof(*rsp)) 4549 return -EPROTO; 4550 4551 type = __le16_to_cpu(rsp->type); 4552 result = __le16_to_cpu(rsp->result); 4553 4554 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 4555 4556 /* L2CAP Info req/rsp are unbound to channels, add extra checks */ 4557 if (cmd->ident != conn->info_ident || 4558 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 4559 return 0; 4560 4561 cancel_delayed_work(&conn->info_timer); 4562 4563 if (result != L2CAP_IR_SUCCESS) { 4564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4565 conn->info_ident = 0; 4566 4567 l2cap_conn_start(conn); 4568 4569 return 0; 4570 } 4571 4572 switch (type) { 4573 case L2CAP_IT_FEAT_MASK: 4574 conn->feat_mask = get_unaligned_le32(rsp->data); 4575 4576 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 4577 struct l2cap_info_req req; 4578 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4579 4580 conn->info_ident = l2cap_get_ident(conn); 4581 4582 l2cap_send_cmd(conn, conn->info_ident, 4583 L2CAP_INFO_REQ, sizeof(req), &req); 4584 } else { 4585 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4586 conn->info_ident = 0; 4587 4588 l2cap_conn_start(conn); 4589 } 4590 break; 4591 4592 case L2CAP_IT_FIXED_CHAN: 4593 conn->remote_fixed_chan = rsp->data[0]; 4594 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4595 conn->info_ident = 0; 4596 4597 l2cap_conn_start(conn); 4598 break; 4599 } 4600 4601 return 0; 4602 } 4603 4604 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 4605 struct l2cap_cmd_hdr *cmd, 4606 u16 cmd_len, u8 *data) 4607 { 4608 struct hci_conn *hcon = conn->hcon; 4609 struct l2cap_conn_param_update_req *req; 4610 struct l2cap_conn_param_update_rsp rsp; 4611 u16 min, max, latency, to_multiplier; 4612 int err; 4613 4614 if (hcon->role != HCI_ROLE_MASTER) 4615 return -EINVAL; 4616 4617 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 4618 return -EPROTO; 4619 4620 req = (struct l2cap_conn_param_update_req *) data; 4621 min = __le16_to_cpu(req->min); 4622 max = __le16_to_cpu(req->max); 4623 latency = __le16_to_cpu(req->latency); 4624 to_multiplier = __le16_to_cpu(req->to_multiplier); 4625 4626 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", 4627 min, max, latency, to_multiplier); 4628 4629 memset(&rsp, 0, sizeof(rsp)); 4630 4631 if (max > hcon->le_conn_max_interval) { 4632 BT_DBG("requested connection interval exceeds current bounds."); 4633 err = -EINVAL; 4634 } else { 4635 err = hci_check_conn_params(min, max, latency, to_multiplier); 4636 } 4637 4638 if (err) 4639 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4640 else 4641 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4642 4643 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4644 sizeof(rsp), &rsp); 4645 4646 if (!err) { 4647 u8 store_hint; 4648 4649 store_hint = hci_le_conn_update(hcon, min, max, latency, 4650 to_multiplier); 4651 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type, 4652 store_hint, min, max, latency, 4653 to_multiplier); 4654 4655 } 4656 4657 return 0; 4658 } 4659 4660 static int l2cap_le_connect_rsp(struct l2cap_conn *conn, 4661 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4662 u8 *data) 4663 { 4664 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; 4665 struct hci_conn *hcon = conn->hcon; 4666 u16 dcid, mtu, mps, credits, result; 4667 struct l2cap_chan *chan; 4668 int err, sec_level; 4669 4670 if (cmd_len < sizeof(*rsp)) 4671 return -EPROTO; 4672 4673 dcid = __le16_to_cpu(rsp->dcid); 4674 mtu = __le16_to_cpu(rsp->mtu); 4675 mps = __le16_to_cpu(rsp->mps); 4676 credits = __le16_to_cpu(rsp->credits); 4677 result = __le16_to_cpu(rsp->result); 4678 4679 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || 4680 dcid < L2CAP_CID_DYN_START || 4681 dcid > L2CAP_CID_LE_DYN_END)) 4682 return -EPROTO; 4683 4684 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", 4685 dcid, mtu, mps, credits, result); 4686 4687 mutex_lock(&conn->chan_lock); 4688 4689 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 4690 if (!chan) { 4691 err = -EBADSLT; 4692 goto unlock; 4693 } 4694 4695 err = 0; 4696 4697 l2cap_chan_lock(chan); 4698 4699 switch (result) { 4700 case L2CAP_CR_LE_SUCCESS: 4701 if (__l2cap_get_chan_by_dcid(conn, dcid)) { 4702 err = -EBADSLT; 4703 break; 4704 } 4705 4706 chan->ident = 0; 4707 chan->dcid = dcid; 4708 chan->omtu = mtu; 4709 chan->remote_mps = mps; 4710 chan->tx_credits = credits; 4711 l2cap_chan_ready(chan); 4712 break; 4713 4714 case L2CAP_CR_LE_AUTHENTICATION: 4715 case L2CAP_CR_LE_ENCRYPTION: 4716 /* If we already have MITM protection we can't do 4717 * anything. 4718 */ 4719 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 4720 l2cap_chan_del(chan, ECONNREFUSED); 4721 break; 4722 } 4723 4724 sec_level = hcon->sec_level + 1; 4725 if (chan->sec_level < sec_level) 4726 chan->sec_level = sec_level; 4727 4728 /* We'll need to send a new Connect Request */ 4729 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); 4730 4731 smp_conn_security(hcon, chan->sec_level); 4732 break; 4733 4734 default: 4735 l2cap_chan_del(chan, ECONNREFUSED); 4736 break; 4737 } 4738 4739 l2cap_chan_unlock(chan); 4740 4741 unlock: 4742 mutex_unlock(&conn->chan_lock); 4743 4744 return err; 4745 } 4746 4747 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, 4748 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4749 u8 *data) 4750 { 4751 int err = 0; 4752 4753 switch (cmd->code) { 4754 case L2CAP_COMMAND_REJ: 4755 l2cap_command_rej(conn, cmd, cmd_len, data); 4756 break; 4757 4758 case L2CAP_CONN_REQ: 4759 err = l2cap_connect_req(conn, cmd, cmd_len, data); 4760 break; 4761 4762 case L2CAP_CONN_RSP: 4763 l2cap_connect_create_rsp(conn, cmd, cmd_len, data); 4764 break; 4765 4766 case L2CAP_CONF_REQ: 4767 err = l2cap_config_req(conn, cmd, cmd_len, data); 4768 break; 4769 4770 case L2CAP_CONF_RSP: 4771 l2cap_config_rsp(conn, cmd, cmd_len, data); 4772 break; 4773 4774 case L2CAP_DISCONN_REQ: 4775 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 4776 break; 4777 4778 case L2CAP_DISCONN_RSP: 4779 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 4780 break; 4781 4782 case L2CAP_ECHO_REQ: 4783 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); 4784 break; 4785 4786 case L2CAP_ECHO_RSP: 4787 break; 4788 4789 case L2CAP_INFO_REQ: 4790 err = l2cap_information_req(conn, cmd, cmd_len, data); 4791 break; 4792 4793 case L2CAP_INFO_RSP: 4794 l2cap_information_rsp(conn, cmd, cmd_len, data); 4795 break; 4796 4797 default: 4798 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); 4799 err = -EINVAL; 4800 break; 4801 } 4802 4803 return err; 4804 } 4805 4806 static int l2cap_le_connect_req(struct l2cap_conn *conn, 4807 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4808 u8 *data) 4809 { 4810 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; 4811 struct l2cap_le_conn_rsp rsp; 4812 struct l2cap_chan *chan, *pchan; 4813 u16 dcid, scid, credits, mtu, mps; 4814 __le16 psm; 4815 u8 result; 4816 4817 if (cmd_len != sizeof(*req)) 4818 return -EPROTO; 4819 4820 scid = __le16_to_cpu(req->scid); 4821 mtu = __le16_to_cpu(req->mtu); 4822 mps = __le16_to_cpu(req->mps); 4823 psm = req->psm; 4824 dcid = 0; 4825 credits = 0; 4826 4827 if (mtu < 23 || mps < 23) 4828 return -EPROTO; 4829 4830 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), 4831 scid, mtu, mps); 4832 4833 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 4834 * page 1059: 4835 * 4836 * Valid range: 0x0001-0x00ff 4837 * 4838 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 4839 */ 4840 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 4841 result = L2CAP_CR_LE_BAD_PSM; 4842 chan = NULL; 4843 goto response; 4844 } 4845 4846 /* Check if we have socket listening on psm */ 4847 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 4848 &conn->hcon->dst, LE_LINK); 4849 if (!pchan) { 4850 result = L2CAP_CR_LE_BAD_PSM; 4851 chan = NULL; 4852 goto response; 4853 } 4854 4855 mutex_lock(&conn->chan_lock); 4856 l2cap_chan_lock(pchan); 4857 4858 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 4859 SMP_ALLOW_STK)) { 4860 result = L2CAP_CR_LE_AUTHENTICATION; 4861 chan = NULL; 4862 goto response_unlock; 4863 } 4864 4865 /* Check for valid dynamic CID range */ 4866 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 4867 result = L2CAP_CR_LE_INVALID_SCID; 4868 chan = NULL; 4869 goto response_unlock; 4870 } 4871 4872 /* Check if we already have channel with that dcid */ 4873 if (__l2cap_get_chan_by_dcid(conn, scid)) { 4874 result = L2CAP_CR_LE_SCID_IN_USE; 4875 chan = NULL; 4876 goto response_unlock; 4877 } 4878 4879 chan = pchan->ops->new_connection(pchan); 4880 if (!chan) { 4881 result = L2CAP_CR_LE_NO_MEM; 4882 goto response_unlock; 4883 } 4884 4885 bacpy(&chan->src, &conn->hcon->src); 4886 bacpy(&chan->dst, &conn->hcon->dst); 4887 chan->src_type = bdaddr_src_type(conn->hcon); 4888 chan->dst_type = bdaddr_dst_type(conn->hcon); 4889 chan->psm = psm; 4890 chan->dcid = scid; 4891 chan->omtu = mtu; 4892 chan->remote_mps = mps; 4893 4894 __l2cap_chan_add(conn, chan); 4895 4896 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); 4897 4898 dcid = chan->scid; 4899 credits = chan->rx_credits; 4900 4901 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 4902 4903 chan->ident = cmd->ident; 4904 4905 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 4906 l2cap_state_change(chan, BT_CONNECT2); 4907 /* The following result value is actually not defined 4908 * for LE CoC but we use it to let the function know 4909 * that it should bail out after doing its cleanup 4910 * instead of sending a response. 4911 */ 4912 result = L2CAP_CR_PEND; 4913 chan->ops->defer(chan); 4914 } else { 4915 l2cap_chan_ready(chan); 4916 result = L2CAP_CR_LE_SUCCESS; 4917 } 4918 4919 response_unlock: 4920 l2cap_chan_unlock(pchan); 4921 mutex_unlock(&conn->chan_lock); 4922 l2cap_chan_put(pchan); 4923 4924 if (result == L2CAP_CR_PEND) 4925 return 0; 4926 4927 response: 4928 if (chan) { 4929 rsp.mtu = cpu_to_le16(chan->imtu); 4930 rsp.mps = cpu_to_le16(chan->mps); 4931 } else { 4932 rsp.mtu = 0; 4933 rsp.mps = 0; 4934 } 4935 4936 rsp.dcid = cpu_to_le16(dcid); 4937 rsp.credits = cpu_to_le16(credits); 4938 rsp.result = cpu_to_le16(result); 4939 4940 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); 4941 4942 return 0; 4943 } 4944 4945 static inline int l2cap_le_credits(struct l2cap_conn *conn, 4946 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4947 u8 *data) 4948 { 4949 struct l2cap_le_credits *pkt; 4950 struct l2cap_chan *chan; 4951 u16 cid, credits, max_credits; 4952 4953 if (cmd_len != sizeof(*pkt)) 4954 return -EPROTO; 4955 4956 pkt = (struct l2cap_le_credits *) data; 4957 cid = __le16_to_cpu(pkt->cid); 4958 credits = __le16_to_cpu(pkt->credits); 4959 4960 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); 4961 4962 chan = l2cap_get_chan_by_dcid(conn, cid); 4963 if (!chan) 4964 return -EBADSLT; 4965 4966 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; 4967 if (credits > max_credits) { 4968 BT_ERR("LE credits overflow"); 4969 l2cap_send_disconn_req(chan, ECONNRESET); 4970 4971 /* Return 0 so that we don't trigger an unnecessary 4972 * command reject packet. 4973 */ 4974 goto unlock; 4975 } 4976 4977 chan->tx_credits += credits; 4978 4979 /* Resume sending */ 4980 l2cap_le_flowctl_send(chan); 4981 4982 if (chan->tx_credits) 4983 chan->ops->resume(chan); 4984 4985 unlock: 4986 l2cap_chan_unlock(chan); 4987 l2cap_chan_put(chan); 4988 4989 return 0; 4990 } 4991 4992 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, 4993 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4994 u8 *data) 4995 { 4996 struct l2cap_ecred_conn_req *req = (void *) data; 4997 struct { 4998 struct l2cap_ecred_conn_rsp rsp; 4999 __le16 dcid[L2CAP_ECRED_MAX_CID]; 5000 } __packed pdu; 5001 struct l2cap_chan *chan, *pchan; 5002 u16 mtu, mps; 5003 __le16 psm; 5004 u8 result, len = 0; 5005 int i, num_scid; 5006 bool defer = false; 5007 5008 if (!enable_ecred) 5009 return -EINVAL; 5010 5011 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5012 result = L2CAP_CR_LE_INVALID_PARAMS; 5013 goto response; 5014 } 5015 5016 cmd_len -= sizeof(*req); 5017 num_scid = cmd_len / sizeof(u16); 5018 5019 if (num_scid > ARRAY_SIZE(pdu.dcid)) { 5020 result = L2CAP_CR_LE_INVALID_PARAMS; 5021 goto response; 5022 } 5023 5024 mtu = __le16_to_cpu(req->mtu); 5025 mps = __le16_to_cpu(req->mps); 5026 5027 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { 5028 result = L2CAP_CR_LE_UNACCEPT_PARAMS; 5029 goto response; 5030 } 5031 5032 psm = req->psm; 5033 5034 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 5035 * page 1059: 5036 * 5037 * Valid range: 0x0001-0x00ff 5038 * 5039 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 5040 */ 5041 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 5042 result = L2CAP_CR_LE_BAD_PSM; 5043 goto response; 5044 } 5045 5046 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); 5047 5048 memset(&pdu, 0, sizeof(pdu)); 5049 5050 /* Check if we have socket listening on psm */ 5051 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 5052 &conn->hcon->dst, LE_LINK); 5053 if (!pchan) { 5054 result = L2CAP_CR_LE_BAD_PSM; 5055 goto response; 5056 } 5057 5058 mutex_lock(&conn->chan_lock); 5059 l2cap_chan_lock(pchan); 5060 5061 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 5062 SMP_ALLOW_STK)) { 5063 result = L2CAP_CR_LE_AUTHENTICATION; 5064 goto unlock; 5065 } 5066 5067 result = L2CAP_CR_LE_SUCCESS; 5068 5069 for (i = 0; i < num_scid; i++) { 5070 u16 scid = __le16_to_cpu(req->scid[i]); 5071 5072 BT_DBG("scid[%d] 0x%4.4x", i, scid); 5073 5074 pdu.dcid[i] = 0x0000; 5075 len += sizeof(*pdu.dcid); 5076 5077 /* Check for valid dynamic CID range */ 5078 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 5079 result = L2CAP_CR_LE_INVALID_SCID; 5080 continue; 5081 } 5082 5083 /* Check if we already have channel with that dcid */ 5084 if (__l2cap_get_chan_by_dcid(conn, scid)) { 5085 result = L2CAP_CR_LE_SCID_IN_USE; 5086 continue; 5087 } 5088 5089 chan = pchan->ops->new_connection(pchan); 5090 if (!chan) { 5091 result = L2CAP_CR_LE_NO_MEM; 5092 continue; 5093 } 5094 5095 bacpy(&chan->src, &conn->hcon->src); 5096 bacpy(&chan->dst, &conn->hcon->dst); 5097 chan->src_type = bdaddr_src_type(conn->hcon); 5098 chan->dst_type = bdaddr_dst_type(conn->hcon); 5099 chan->psm = psm; 5100 chan->dcid = scid; 5101 chan->omtu = mtu; 5102 chan->remote_mps = mps; 5103 5104 __l2cap_chan_add(conn, chan); 5105 5106 l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); 5107 5108 /* Init response */ 5109 if (!pdu.rsp.credits) { 5110 pdu.rsp.mtu = cpu_to_le16(chan->imtu); 5111 pdu.rsp.mps = cpu_to_le16(chan->mps); 5112 pdu.rsp.credits = cpu_to_le16(chan->rx_credits); 5113 } 5114 5115 pdu.dcid[i] = cpu_to_le16(chan->scid); 5116 5117 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 5118 5119 chan->ident = cmd->ident; 5120 chan->mode = L2CAP_MODE_EXT_FLOWCTL; 5121 5122 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 5123 l2cap_state_change(chan, BT_CONNECT2); 5124 defer = true; 5125 chan->ops->defer(chan); 5126 } else { 5127 l2cap_chan_ready(chan); 5128 } 5129 } 5130 5131 unlock: 5132 l2cap_chan_unlock(pchan); 5133 mutex_unlock(&conn->chan_lock); 5134 l2cap_chan_put(pchan); 5135 5136 response: 5137 pdu.rsp.result = cpu_to_le16(result); 5138 5139 if (defer) 5140 return 0; 5141 5142 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, 5143 sizeof(pdu.rsp) + len, &pdu); 5144 5145 return 0; 5146 } 5147 5148 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, 5149 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5150 u8 *data) 5151 { 5152 struct l2cap_ecred_conn_rsp *rsp = (void *) data; 5153 struct hci_conn *hcon = conn->hcon; 5154 u16 mtu, mps, credits, result; 5155 struct l2cap_chan *chan, *tmp; 5156 int err = 0, sec_level; 5157 int i = 0; 5158 5159 if (cmd_len < sizeof(*rsp)) 5160 return -EPROTO; 5161 5162 mtu = __le16_to_cpu(rsp->mtu); 5163 mps = __le16_to_cpu(rsp->mps); 5164 credits = __le16_to_cpu(rsp->credits); 5165 result = __le16_to_cpu(rsp->result); 5166 5167 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, 5168 result); 5169 5170 mutex_lock(&conn->chan_lock); 5171 5172 cmd_len -= sizeof(*rsp); 5173 5174 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 5175 u16 dcid; 5176 5177 if (chan->ident != cmd->ident || 5178 chan->mode != L2CAP_MODE_EXT_FLOWCTL || 5179 chan->state == BT_CONNECTED) 5180 continue; 5181 5182 l2cap_chan_lock(chan); 5183 5184 /* Check that there is a dcid for each pending channel */ 5185 if (cmd_len < sizeof(dcid)) { 5186 l2cap_chan_del(chan, ECONNREFUSED); 5187 l2cap_chan_unlock(chan); 5188 continue; 5189 } 5190 5191 dcid = __le16_to_cpu(rsp->dcid[i++]); 5192 cmd_len -= sizeof(u16); 5193 5194 BT_DBG("dcid[%d] 0x%4.4x", i, dcid); 5195 5196 /* Check if dcid is already in use */ 5197 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { 5198 /* If a device receives a 5199 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an 5200 * already-assigned Destination CID, then both the 5201 * original channel and the new channel shall be 5202 * immediately discarded and not used. 5203 */ 5204 l2cap_chan_del(chan, ECONNREFUSED); 5205 l2cap_chan_unlock(chan); 5206 chan = __l2cap_get_chan_by_dcid(conn, dcid); 5207 l2cap_chan_lock(chan); 5208 l2cap_chan_del(chan, ECONNRESET); 5209 l2cap_chan_unlock(chan); 5210 continue; 5211 } 5212 5213 switch (result) { 5214 case L2CAP_CR_LE_AUTHENTICATION: 5215 case L2CAP_CR_LE_ENCRYPTION: 5216 /* If we already have MITM protection we can't do 5217 * anything. 5218 */ 5219 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 5220 l2cap_chan_del(chan, ECONNREFUSED); 5221 break; 5222 } 5223 5224 sec_level = hcon->sec_level + 1; 5225 if (chan->sec_level < sec_level) 5226 chan->sec_level = sec_level; 5227 5228 /* We'll need to send a new Connect Request */ 5229 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); 5230 5231 smp_conn_security(hcon, chan->sec_level); 5232 break; 5233 5234 case L2CAP_CR_LE_BAD_PSM: 5235 l2cap_chan_del(chan, ECONNREFUSED); 5236 break; 5237 5238 default: 5239 /* If dcid was not set it means channels was refused */ 5240 if (!dcid) { 5241 l2cap_chan_del(chan, ECONNREFUSED); 5242 break; 5243 } 5244 5245 chan->ident = 0; 5246 chan->dcid = dcid; 5247 chan->omtu = mtu; 5248 chan->remote_mps = mps; 5249 chan->tx_credits = credits; 5250 l2cap_chan_ready(chan); 5251 break; 5252 } 5253 5254 l2cap_chan_unlock(chan); 5255 } 5256 5257 mutex_unlock(&conn->chan_lock); 5258 5259 return err; 5260 } 5261 5262 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, 5263 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5264 u8 *data) 5265 { 5266 struct l2cap_ecred_reconf_req *req = (void *) data; 5267 struct l2cap_ecred_reconf_rsp rsp; 5268 u16 mtu, mps, result; 5269 struct l2cap_chan *chan; 5270 int i, num_scid; 5271 5272 if (!enable_ecred) 5273 return -EINVAL; 5274 5275 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { 5276 result = L2CAP_CR_LE_INVALID_PARAMS; 5277 goto respond; 5278 } 5279 5280 mtu = __le16_to_cpu(req->mtu); 5281 mps = __le16_to_cpu(req->mps); 5282 5283 BT_DBG("mtu %u mps %u", mtu, mps); 5284 5285 if (mtu < L2CAP_ECRED_MIN_MTU) { 5286 result = L2CAP_RECONF_INVALID_MTU; 5287 goto respond; 5288 } 5289 5290 if (mps < L2CAP_ECRED_MIN_MPS) { 5291 result = L2CAP_RECONF_INVALID_MPS; 5292 goto respond; 5293 } 5294 5295 cmd_len -= sizeof(*req); 5296 num_scid = cmd_len / sizeof(u16); 5297 result = L2CAP_RECONF_SUCCESS; 5298 5299 for (i = 0; i < num_scid; i++) { 5300 u16 scid; 5301 5302 scid = __le16_to_cpu(req->scid[i]); 5303 if (!scid) 5304 return -EPROTO; 5305 5306 chan = __l2cap_get_chan_by_dcid(conn, scid); 5307 if (!chan) 5308 continue; 5309 5310 /* If the MTU value is decreased for any of the included 5311 * channels, then the receiver shall disconnect all 5312 * included channels. 5313 */ 5314 if (chan->omtu > mtu) { 5315 BT_ERR("chan %p decreased MTU %u -> %u", chan, 5316 chan->omtu, mtu); 5317 result = L2CAP_RECONF_INVALID_MTU; 5318 } 5319 5320 chan->omtu = mtu; 5321 chan->remote_mps = mps; 5322 } 5323 5324 respond: 5325 rsp.result = cpu_to_le16(result); 5326 5327 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), 5328 &rsp); 5329 5330 return 0; 5331 } 5332 5333 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, 5334 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5335 u8 *data) 5336 { 5337 struct l2cap_chan *chan, *tmp; 5338 struct l2cap_ecred_conn_rsp *rsp = (void *) data; 5339 u16 result; 5340 5341 if (cmd_len < sizeof(*rsp)) 5342 return -EPROTO; 5343 5344 result = __le16_to_cpu(rsp->result); 5345 5346 BT_DBG("result 0x%4.4x", rsp->result); 5347 5348 if (!result) 5349 return 0; 5350 5351 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 5352 if (chan->ident != cmd->ident) 5353 continue; 5354 5355 l2cap_chan_del(chan, ECONNRESET); 5356 } 5357 5358 return 0; 5359 } 5360 5361 static inline int l2cap_le_command_rej(struct l2cap_conn *conn, 5362 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5363 u8 *data) 5364 { 5365 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 5366 struct l2cap_chan *chan; 5367 5368 if (cmd_len < sizeof(*rej)) 5369 return -EPROTO; 5370 5371 mutex_lock(&conn->chan_lock); 5372 5373 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 5374 if (!chan) 5375 goto done; 5376 5377 chan = l2cap_chan_hold_unless_zero(chan); 5378 if (!chan) 5379 goto done; 5380 5381 l2cap_chan_lock(chan); 5382 l2cap_chan_del(chan, ECONNREFUSED); 5383 l2cap_chan_unlock(chan); 5384 l2cap_chan_put(chan); 5385 5386 done: 5387 mutex_unlock(&conn->chan_lock); 5388 return 0; 5389 } 5390 5391 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, 5392 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5393 u8 *data) 5394 { 5395 int err = 0; 5396 5397 switch (cmd->code) { 5398 case L2CAP_COMMAND_REJ: 5399 l2cap_le_command_rej(conn, cmd, cmd_len, data); 5400 break; 5401 5402 case L2CAP_CONN_PARAM_UPDATE_REQ: 5403 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); 5404 break; 5405 5406 case L2CAP_CONN_PARAM_UPDATE_RSP: 5407 break; 5408 5409 case L2CAP_LE_CONN_RSP: 5410 l2cap_le_connect_rsp(conn, cmd, cmd_len, data); 5411 break; 5412 5413 case L2CAP_LE_CONN_REQ: 5414 err = l2cap_le_connect_req(conn, cmd, cmd_len, data); 5415 break; 5416 5417 case L2CAP_LE_CREDITS: 5418 err = l2cap_le_credits(conn, cmd, cmd_len, data); 5419 break; 5420 5421 case L2CAP_ECRED_CONN_REQ: 5422 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); 5423 break; 5424 5425 case L2CAP_ECRED_CONN_RSP: 5426 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); 5427 break; 5428 5429 case L2CAP_ECRED_RECONF_REQ: 5430 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); 5431 break; 5432 5433 case L2CAP_ECRED_RECONF_RSP: 5434 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); 5435 break; 5436 5437 case L2CAP_DISCONN_REQ: 5438 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 5439 break; 5440 5441 case L2CAP_DISCONN_RSP: 5442 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 5443 break; 5444 5445 default: 5446 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); 5447 err = -EINVAL; 5448 break; 5449 } 5450 5451 return err; 5452 } 5453 5454 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, 5455 struct sk_buff *skb) 5456 { 5457 struct hci_conn *hcon = conn->hcon; 5458 struct l2cap_cmd_hdr *cmd; 5459 u16 len; 5460 int err; 5461 5462 if (hcon->type != LE_LINK) 5463 goto drop; 5464 5465 if (skb->len < L2CAP_CMD_HDR_SIZE) 5466 goto drop; 5467 5468 cmd = (void *) skb->data; 5469 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 5470 5471 len = le16_to_cpu(cmd->len); 5472 5473 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); 5474 5475 if (len != skb->len || !cmd->ident) { 5476 BT_DBG("corrupted command"); 5477 goto drop; 5478 } 5479 5480 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); 5481 if (err) { 5482 struct l2cap_cmd_rej_unk rej; 5483 5484 BT_ERR("Wrong link type (%d)", err); 5485 5486 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5487 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 5488 sizeof(rej), &rej); 5489 } 5490 5491 drop: 5492 kfree_skb(skb); 5493 } 5494 5495 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) 5496 { 5497 struct l2cap_cmd_rej_unk rej; 5498 5499 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5500 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 5501 } 5502 5503 static inline void l2cap_sig_channel(struct l2cap_conn *conn, 5504 struct sk_buff *skb) 5505 { 5506 struct hci_conn *hcon = conn->hcon; 5507 struct l2cap_cmd_hdr *cmd; 5508 int err; 5509 5510 l2cap_raw_recv(conn, skb); 5511 5512 if (hcon->type != ACL_LINK) 5513 goto drop; 5514 5515 while (skb->len >= L2CAP_CMD_HDR_SIZE) { 5516 u16 len; 5517 5518 cmd = (void *) skb->data; 5519 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 5520 5521 len = le16_to_cpu(cmd->len); 5522 5523 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, 5524 cmd->ident); 5525 5526 if (len > skb->len || !cmd->ident) { 5527 BT_DBG("corrupted command"); 5528 l2cap_sig_send_rej(conn, cmd->ident); 5529 skb_pull(skb, len > skb->len ? skb->len : len); 5530 continue; 5531 } 5532 5533 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); 5534 if (err) { 5535 BT_ERR("Wrong link type (%d)", err); 5536 l2cap_sig_send_rej(conn, cmd->ident); 5537 } 5538 5539 skb_pull(skb, len); 5540 } 5541 5542 if (skb->len > 0) { 5543 BT_DBG("corrupted command"); 5544 l2cap_sig_send_rej(conn, 0); 5545 } 5546 5547 drop: 5548 kfree_skb(skb); 5549 } 5550 5551 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) 5552 { 5553 u16 our_fcs, rcv_fcs; 5554 int hdr_size; 5555 5556 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 5557 hdr_size = L2CAP_EXT_HDR_SIZE; 5558 else 5559 hdr_size = L2CAP_ENH_HDR_SIZE; 5560 5561 if (chan->fcs == L2CAP_FCS_CRC16) { 5562 skb_trim(skb, skb->len - L2CAP_FCS_SIZE); 5563 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 5564 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 5565 5566 if (our_fcs != rcv_fcs) 5567 return -EBADMSG; 5568 } 5569 return 0; 5570 } 5571 5572 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 5573 { 5574 struct l2cap_ctrl control; 5575 5576 BT_DBG("chan %p", chan); 5577 5578 memset(&control, 0, sizeof(control)); 5579 control.sframe = 1; 5580 control.final = 1; 5581 control.reqseq = chan->buffer_seq; 5582 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5583 5584 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5585 control.super = L2CAP_SUPER_RNR; 5586 l2cap_send_sframe(chan, &control); 5587 } 5588 5589 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 5590 chan->unacked_frames > 0) 5591 __set_retrans_timer(chan); 5592 5593 /* Send pending iframes */ 5594 l2cap_ertm_send(chan); 5595 5596 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 5597 test_bit(CONN_SEND_FBIT, &chan->conn_state)) { 5598 /* F-bit wasn't sent in an s-frame or i-frame yet, so 5599 * send it now. 5600 */ 5601 control.super = L2CAP_SUPER_RR; 5602 l2cap_send_sframe(chan, &control); 5603 } 5604 } 5605 5606 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, 5607 struct sk_buff **last_frag) 5608 { 5609 /* skb->len reflects data in skb as well as all fragments 5610 * skb->data_len reflects only data in fragments 5611 */ 5612 if (!skb_has_frag_list(skb)) 5613 skb_shinfo(skb)->frag_list = new_frag; 5614 5615 new_frag->next = NULL; 5616 5617 (*last_frag)->next = new_frag; 5618 *last_frag = new_frag; 5619 5620 skb->len += new_frag->len; 5621 skb->data_len += new_frag->len; 5622 skb->truesize += new_frag->truesize; 5623 } 5624 5625 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, 5626 struct l2cap_ctrl *control) 5627 { 5628 int err = -EINVAL; 5629 5630 switch (control->sar) { 5631 case L2CAP_SAR_UNSEGMENTED: 5632 if (chan->sdu) 5633 break; 5634 5635 err = chan->ops->recv(chan, skb); 5636 break; 5637 5638 case L2CAP_SAR_START: 5639 if (chan->sdu) 5640 break; 5641 5642 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) 5643 break; 5644 5645 chan->sdu_len = get_unaligned_le16(skb->data); 5646 skb_pull(skb, L2CAP_SDULEN_SIZE); 5647 5648 if (chan->sdu_len > chan->imtu) { 5649 err = -EMSGSIZE; 5650 break; 5651 } 5652 5653 if (skb->len >= chan->sdu_len) 5654 break; 5655 5656 chan->sdu = skb; 5657 chan->sdu_last_frag = skb; 5658 5659 skb = NULL; 5660 err = 0; 5661 break; 5662 5663 case L2CAP_SAR_CONTINUE: 5664 if (!chan->sdu) 5665 break; 5666 5667 append_skb_frag(chan->sdu, skb, 5668 &chan->sdu_last_frag); 5669 skb = NULL; 5670 5671 if (chan->sdu->len >= chan->sdu_len) 5672 break; 5673 5674 err = 0; 5675 break; 5676 5677 case L2CAP_SAR_END: 5678 if (!chan->sdu) 5679 break; 5680 5681 append_skb_frag(chan->sdu, skb, 5682 &chan->sdu_last_frag); 5683 skb = NULL; 5684 5685 if (chan->sdu->len != chan->sdu_len) 5686 break; 5687 5688 err = chan->ops->recv(chan, chan->sdu); 5689 5690 if (!err) { 5691 /* Reassembly complete */ 5692 chan->sdu = NULL; 5693 chan->sdu_last_frag = NULL; 5694 chan->sdu_len = 0; 5695 } 5696 break; 5697 } 5698 5699 if (err) { 5700 kfree_skb(skb); 5701 kfree_skb(chan->sdu); 5702 chan->sdu = NULL; 5703 chan->sdu_last_frag = NULL; 5704 chan->sdu_len = 0; 5705 } 5706 5707 return err; 5708 } 5709 5710 static int l2cap_resegment(struct l2cap_chan *chan) 5711 { 5712 /* Placeholder */ 5713 return 0; 5714 } 5715 5716 void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 5717 { 5718 u8 event; 5719 5720 if (chan->mode != L2CAP_MODE_ERTM) 5721 return; 5722 5723 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; 5724 l2cap_tx(chan, NULL, NULL, event); 5725 } 5726 5727 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) 5728 { 5729 int err = 0; 5730 /* Pass sequential frames to l2cap_reassemble_sdu() 5731 * until a gap is encountered. 5732 */ 5733 5734 BT_DBG("chan %p", chan); 5735 5736 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5737 struct sk_buff *skb; 5738 BT_DBG("Searching for skb with txseq %d (queue len %d)", 5739 chan->buffer_seq, skb_queue_len(&chan->srej_q)); 5740 5741 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); 5742 5743 if (!skb) 5744 break; 5745 5746 skb_unlink(skb, &chan->srej_q); 5747 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 5748 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); 5749 if (err) 5750 break; 5751 } 5752 5753 if (skb_queue_empty(&chan->srej_q)) { 5754 chan->rx_state = L2CAP_RX_STATE_RECV; 5755 l2cap_send_ack(chan); 5756 } 5757 5758 return err; 5759 } 5760 5761 static void l2cap_handle_srej(struct l2cap_chan *chan, 5762 struct l2cap_ctrl *control) 5763 { 5764 struct sk_buff *skb; 5765 5766 BT_DBG("chan %p, control %p", chan, control); 5767 5768 if (control->reqseq == chan->next_tx_seq) { 5769 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5770 l2cap_send_disconn_req(chan, ECONNRESET); 5771 return; 5772 } 5773 5774 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 5775 5776 if (skb == NULL) { 5777 BT_DBG("Seq %d not available for retransmission", 5778 control->reqseq); 5779 return; 5780 } 5781 5782 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { 5783 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5784 l2cap_send_disconn_req(chan, ECONNRESET); 5785 return; 5786 } 5787 5788 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5789 5790 if (control->poll) { 5791 l2cap_pass_to_tx(chan, control); 5792 5793 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5794 l2cap_retransmit(chan, control); 5795 l2cap_ertm_send(chan); 5796 5797 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 5798 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5799 chan->srej_save_reqseq = control->reqseq; 5800 } 5801 } else { 5802 l2cap_pass_to_tx_fbit(chan, control); 5803 5804 if (control->final) { 5805 if (chan->srej_save_reqseq != control->reqseq || 5806 !test_and_clear_bit(CONN_SREJ_ACT, 5807 &chan->conn_state)) 5808 l2cap_retransmit(chan, control); 5809 } else { 5810 l2cap_retransmit(chan, control); 5811 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 5812 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5813 chan->srej_save_reqseq = control->reqseq; 5814 } 5815 } 5816 } 5817 } 5818 5819 static void l2cap_handle_rej(struct l2cap_chan *chan, 5820 struct l2cap_ctrl *control) 5821 { 5822 struct sk_buff *skb; 5823 5824 BT_DBG("chan %p, control %p", chan, control); 5825 5826 if (control->reqseq == chan->next_tx_seq) { 5827 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5828 l2cap_send_disconn_req(chan, ECONNRESET); 5829 return; 5830 } 5831 5832 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 5833 5834 if (chan->max_tx && skb && 5835 bt_cb(skb)->l2cap.retries >= chan->max_tx) { 5836 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5837 l2cap_send_disconn_req(chan, ECONNRESET); 5838 return; 5839 } 5840 5841 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5842 5843 l2cap_pass_to_tx(chan, control); 5844 5845 if (control->final) { 5846 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 5847 l2cap_retransmit_all(chan, control); 5848 } else { 5849 l2cap_retransmit_all(chan, control); 5850 l2cap_ertm_send(chan); 5851 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) 5852 set_bit(CONN_REJ_ACT, &chan->conn_state); 5853 } 5854 } 5855 5856 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) 5857 { 5858 BT_DBG("chan %p, txseq %d", chan, txseq); 5859 5860 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, 5861 chan->expected_tx_seq); 5862 5863 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 5864 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 5865 chan->tx_win) { 5866 /* See notes below regarding "double poll" and 5867 * invalid packets. 5868 */ 5869 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 5870 BT_DBG("Invalid/Ignore - after SREJ"); 5871 return L2CAP_TXSEQ_INVALID_IGNORE; 5872 } else { 5873 BT_DBG("Invalid - in window after SREJ sent"); 5874 return L2CAP_TXSEQ_INVALID; 5875 } 5876 } 5877 5878 if (chan->srej_list.head == txseq) { 5879 BT_DBG("Expected SREJ"); 5880 return L2CAP_TXSEQ_EXPECTED_SREJ; 5881 } 5882 5883 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { 5884 BT_DBG("Duplicate SREJ - txseq already stored"); 5885 return L2CAP_TXSEQ_DUPLICATE_SREJ; 5886 } 5887 5888 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { 5889 BT_DBG("Unexpected SREJ - not requested"); 5890 return L2CAP_TXSEQ_UNEXPECTED_SREJ; 5891 } 5892 } 5893 5894 if (chan->expected_tx_seq == txseq) { 5895 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 5896 chan->tx_win) { 5897 BT_DBG("Invalid - txseq outside tx window"); 5898 return L2CAP_TXSEQ_INVALID; 5899 } else { 5900 BT_DBG("Expected"); 5901 return L2CAP_TXSEQ_EXPECTED; 5902 } 5903 } 5904 5905 if (__seq_offset(chan, txseq, chan->last_acked_seq) < 5906 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { 5907 BT_DBG("Duplicate - expected_tx_seq later than txseq"); 5908 return L2CAP_TXSEQ_DUPLICATE; 5909 } 5910 5911 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { 5912 /* A source of invalid packets is a "double poll" condition, 5913 * where delays cause us to send multiple poll packets. If 5914 * the remote stack receives and processes both polls, 5915 * sequence numbers can wrap around in such a way that a 5916 * resent frame has a sequence number that looks like new data 5917 * with a sequence gap. This would trigger an erroneous SREJ 5918 * request. 5919 * 5920 * Fortunately, this is impossible with a tx window that's 5921 * less than half of the maximum sequence number, which allows 5922 * invalid frames to be safely ignored. 5923 * 5924 * With tx window sizes greater than half of the tx window 5925 * maximum, the frame is invalid and cannot be ignored. This 5926 * causes a disconnect. 5927 */ 5928 5929 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 5930 BT_DBG("Invalid/Ignore - txseq outside tx window"); 5931 return L2CAP_TXSEQ_INVALID_IGNORE; 5932 } else { 5933 BT_DBG("Invalid - txseq outside tx window"); 5934 return L2CAP_TXSEQ_INVALID; 5935 } 5936 } else { 5937 BT_DBG("Unexpected - txseq indicates missing frames"); 5938 return L2CAP_TXSEQ_UNEXPECTED; 5939 } 5940 } 5941 5942 static int l2cap_rx_state_recv(struct l2cap_chan *chan, 5943 struct l2cap_ctrl *control, 5944 struct sk_buff *skb, u8 event) 5945 { 5946 struct l2cap_ctrl local_control; 5947 int err = 0; 5948 bool skb_in_use = false; 5949 5950 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 5951 event); 5952 5953 switch (event) { 5954 case L2CAP_EV_RECV_IFRAME: 5955 switch (l2cap_classify_txseq(chan, control->txseq)) { 5956 case L2CAP_TXSEQ_EXPECTED: 5957 l2cap_pass_to_tx(chan, control); 5958 5959 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5960 BT_DBG("Busy, discarding expected seq %d", 5961 control->txseq); 5962 break; 5963 } 5964 5965 chan->expected_tx_seq = __next_seq(chan, 5966 control->txseq); 5967 5968 chan->buffer_seq = chan->expected_tx_seq; 5969 skb_in_use = true; 5970 5971 /* l2cap_reassemble_sdu may free skb, hence invalidate 5972 * control, so make a copy in advance to use it after 5973 * l2cap_reassemble_sdu returns and to avoid the race 5974 * condition, for example: 5975 * 5976 * The current thread calls: 5977 * l2cap_reassemble_sdu 5978 * chan->ops->recv == l2cap_sock_recv_cb 5979 * __sock_queue_rcv_skb 5980 * Another thread calls: 5981 * bt_sock_recvmsg 5982 * skb_recv_datagram 5983 * skb_free_datagram 5984 * Then the current thread tries to access control, but 5985 * it was freed by skb_free_datagram. 5986 */ 5987 local_control = *control; 5988 err = l2cap_reassemble_sdu(chan, skb, control); 5989 if (err) 5990 break; 5991 5992 if (local_control.final) { 5993 if (!test_and_clear_bit(CONN_REJ_ACT, 5994 &chan->conn_state)) { 5995 local_control.final = 0; 5996 l2cap_retransmit_all(chan, &local_control); 5997 l2cap_ertm_send(chan); 5998 } 5999 } 6000 6001 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 6002 l2cap_send_ack(chan); 6003 break; 6004 case L2CAP_TXSEQ_UNEXPECTED: 6005 l2cap_pass_to_tx(chan, control); 6006 6007 /* Can't issue SREJ frames in the local busy state. 6008 * Drop this frame, it will be seen as missing 6009 * when local busy is exited. 6010 */ 6011 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6012 BT_DBG("Busy, discarding unexpected seq %d", 6013 control->txseq); 6014 break; 6015 } 6016 6017 /* There was a gap in the sequence, so an SREJ 6018 * must be sent for each missing frame. The 6019 * current frame is stored for later use. 6020 */ 6021 skb_queue_tail(&chan->srej_q, skb); 6022 skb_in_use = true; 6023 BT_DBG("Queued %p (queue len %d)", skb, 6024 skb_queue_len(&chan->srej_q)); 6025 6026 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 6027 l2cap_seq_list_clear(&chan->srej_list); 6028 l2cap_send_srej(chan, control->txseq); 6029 6030 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; 6031 break; 6032 case L2CAP_TXSEQ_DUPLICATE: 6033 l2cap_pass_to_tx(chan, control); 6034 break; 6035 case L2CAP_TXSEQ_INVALID_IGNORE: 6036 break; 6037 case L2CAP_TXSEQ_INVALID: 6038 default: 6039 l2cap_send_disconn_req(chan, ECONNRESET); 6040 break; 6041 } 6042 break; 6043 case L2CAP_EV_RECV_RR: 6044 l2cap_pass_to_tx(chan, control); 6045 if (control->final) { 6046 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6047 6048 if (!test_and_clear_bit(CONN_REJ_ACT, 6049 &chan->conn_state)) { 6050 control->final = 0; 6051 l2cap_retransmit_all(chan, control); 6052 } 6053 6054 l2cap_ertm_send(chan); 6055 } else if (control->poll) { 6056 l2cap_send_i_or_rr_or_rnr(chan); 6057 } else { 6058 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6059 &chan->conn_state) && 6060 chan->unacked_frames) 6061 __set_retrans_timer(chan); 6062 6063 l2cap_ertm_send(chan); 6064 } 6065 break; 6066 case L2CAP_EV_RECV_RNR: 6067 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6068 l2cap_pass_to_tx(chan, control); 6069 if (control && control->poll) { 6070 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6071 l2cap_send_rr_or_rnr(chan, 0); 6072 } 6073 __clear_retrans_timer(chan); 6074 l2cap_seq_list_clear(&chan->retrans_list); 6075 break; 6076 case L2CAP_EV_RECV_REJ: 6077 l2cap_handle_rej(chan, control); 6078 break; 6079 case L2CAP_EV_RECV_SREJ: 6080 l2cap_handle_srej(chan, control); 6081 break; 6082 default: 6083 break; 6084 } 6085 6086 if (skb && !skb_in_use) { 6087 BT_DBG("Freeing %p", skb); 6088 kfree_skb(skb); 6089 } 6090 6091 return err; 6092 } 6093 6094 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, 6095 struct l2cap_ctrl *control, 6096 struct sk_buff *skb, u8 event) 6097 { 6098 int err = 0; 6099 u16 txseq = control->txseq; 6100 bool skb_in_use = false; 6101 6102 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6103 event); 6104 6105 switch (event) { 6106 case L2CAP_EV_RECV_IFRAME: 6107 switch (l2cap_classify_txseq(chan, txseq)) { 6108 case L2CAP_TXSEQ_EXPECTED: 6109 /* Keep frame for reassembly later */ 6110 l2cap_pass_to_tx(chan, control); 6111 skb_queue_tail(&chan->srej_q, skb); 6112 skb_in_use = true; 6113 BT_DBG("Queued %p (queue len %d)", skb, 6114 skb_queue_len(&chan->srej_q)); 6115 6116 chan->expected_tx_seq = __next_seq(chan, txseq); 6117 break; 6118 case L2CAP_TXSEQ_EXPECTED_SREJ: 6119 l2cap_seq_list_pop(&chan->srej_list); 6120 6121 l2cap_pass_to_tx(chan, control); 6122 skb_queue_tail(&chan->srej_q, skb); 6123 skb_in_use = true; 6124 BT_DBG("Queued %p (queue len %d)", skb, 6125 skb_queue_len(&chan->srej_q)); 6126 6127 err = l2cap_rx_queued_iframes(chan); 6128 if (err) 6129 break; 6130 6131 break; 6132 case L2CAP_TXSEQ_UNEXPECTED: 6133 /* Got a frame that can't be reassembled yet. 6134 * Save it for later, and send SREJs to cover 6135 * the missing frames. 6136 */ 6137 skb_queue_tail(&chan->srej_q, skb); 6138 skb_in_use = true; 6139 BT_DBG("Queued %p (queue len %d)", skb, 6140 skb_queue_len(&chan->srej_q)); 6141 6142 l2cap_pass_to_tx(chan, control); 6143 l2cap_send_srej(chan, control->txseq); 6144 break; 6145 case L2CAP_TXSEQ_UNEXPECTED_SREJ: 6146 /* This frame was requested with an SREJ, but 6147 * some expected retransmitted frames are 6148 * missing. Request retransmission of missing 6149 * SREJ'd frames. 6150 */ 6151 skb_queue_tail(&chan->srej_q, skb); 6152 skb_in_use = true; 6153 BT_DBG("Queued %p (queue len %d)", skb, 6154 skb_queue_len(&chan->srej_q)); 6155 6156 l2cap_pass_to_tx(chan, control); 6157 l2cap_send_srej_list(chan, control->txseq); 6158 break; 6159 case L2CAP_TXSEQ_DUPLICATE_SREJ: 6160 /* We've already queued this frame. Drop this copy. */ 6161 l2cap_pass_to_tx(chan, control); 6162 break; 6163 case L2CAP_TXSEQ_DUPLICATE: 6164 /* Expecting a later sequence number, so this frame 6165 * was already received. Ignore it completely. 6166 */ 6167 break; 6168 case L2CAP_TXSEQ_INVALID_IGNORE: 6169 break; 6170 case L2CAP_TXSEQ_INVALID: 6171 default: 6172 l2cap_send_disconn_req(chan, ECONNRESET); 6173 break; 6174 } 6175 break; 6176 case L2CAP_EV_RECV_RR: 6177 l2cap_pass_to_tx(chan, control); 6178 if (control->final) { 6179 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6180 6181 if (!test_and_clear_bit(CONN_REJ_ACT, 6182 &chan->conn_state)) { 6183 control->final = 0; 6184 l2cap_retransmit_all(chan, control); 6185 } 6186 6187 l2cap_ertm_send(chan); 6188 } else if (control->poll) { 6189 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6190 &chan->conn_state) && 6191 chan->unacked_frames) { 6192 __set_retrans_timer(chan); 6193 } 6194 6195 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6196 l2cap_send_srej_tail(chan); 6197 } else { 6198 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6199 &chan->conn_state) && 6200 chan->unacked_frames) 6201 __set_retrans_timer(chan); 6202 6203 l2cap_send_ack(chan); 6204 } 6205 break; 6206 case L2CAP_EV_RECV_RNR: 6207 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6208 l2cap_pass_to_tx(chan, control); 6209 if (control->poll) { 6210 l2cap_send_srej_tail(chan); 6211 } else { 6212 struct l2cap_ctrl rr_control; 6213 memset(&rr_control, 0, sizeof(rr_control)); 6214 rr_control.sframe = 1; 6215 rr_control.super = L2CAP_SUPER_RR; 6216 rr_control.reqseq = chan->buffer_seq; 6217 l2cap_send_sframe(chan, &rr_control); 6218 } 6219 6220 break; 6221 case L2CAP_EV_RECV_REJ: 6222 l2cap_handle_rej(chan, control); 6223 break; 6224 case L2CAP_EV_RECV_SREJ: 6225 l2cap_handle_srej(chan, control); 6226 break; 6227 } 6228 6229 if (skb && !skb_in_use) { 6230 BT_DBG("Freeing %p", skb); 6231 kfree_skb(skb); 6232 } 6233 6234 return err; 6235 } 6236 6237 static int l2cap_finish_move(struct l2cap_chan *chan) 6238 { 6239 BT_DBG("chan %p", chan); 6240 6241 chan->rx_state = L2CAP_RX_STATE_RECV; 6242 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; 6243 6244 return l2cap_resegment(chan); 6245 } 6246 6247 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, 6248 struct l2cap_ctrl *control, 6249 struct sk_buff *skb, u8 event) 6250 { 6251 int err; 6252 6253 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6254 event); 6255 6256 if (!control->poll) 6257 return -EPROTO; 6258 6259 l2cap_process_reqseq(chan, control->reqseq); 6260 6261 if (!skb_queue_empty(&chan->tx_q)) 6262 chan->tx_send_head = skb_peek(&chan->tx_q); 6263 else 6264 chan->tx_send_head = NULL; 6265 6266 /* Rewind next_tx_seq to the point expected 6267 * by the receiver. 6268 */ 6269 chan->next_tx_seq = control->reqseq; 6270 chan->unacked_frames = 0; 6271 6272 err = l2cap_finish_move(chan); 6273 if (err) 6274 return err; 6275 6276 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6277 l2cap_send_i_or_rr_or_rnr(chan); 6278 6279 if (event == L2CAP_EV_RECV_IFRAME) 6280 return -EPROTO; 6281 6282 return l2cap_rx_state_recv(chan, control, NULL, event); 6283 } 6284 6285 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, 6286 struct l2cap_ctrl *control, 6287 struct sk_buff *skb, u8 event) 6288 { 6289 int err; 6290 6291 if (!control->final) 6292 return -EPROTO; 6293 6294 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6295 6296 chan->rx_state = L2CAP_RX_STATE_RECV; 6297 l2cap_process_reqseq(chan, control->reqseq); 6298 6299 if (!skb_queue_empty(&chan->tx_q)) 6300 chan->tx_send_head = skb_peek(&chan->tx_q); 6301 else 6302 chan->tx_send_head = NULL; 6303 6304 /* Rewind next_tx_seq to the point expected 6305 * by the receiver. 6306 */ 6307 chan->next_tx_seq = control->reqseq; 6308 chan->unacked_frames = 0; 6309 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; 6310 6311 err = l2cap_resegment(chan); 6312 6313 if (!err) 6314 err = l2cap_rx_state_recv(chan, control, skb, event); 6315 6316 return err; 6317 } 6318 6319 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) 6320 { 6321 /* Make sure reqseq is for a packet that has been sent but not acked */ 6322 u16 unacked; 6323 6324 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); 6325 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; 6326 } 6327 6328 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6329 struct sk_buff *skb, u8 event) 6330 { 6331 int err = 0; 6332 6333 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, 6334 control, skb, event, chan->rx_state); 6335 6336 if (__valid_reqseq(chan, control->reqseq)) { 6337 switch (chan->rx_state) { 6338 case L2CAP_RX_STATE_RECV: 6339 err = l2cap_rx_state_recv(chan, control, skb, event); 6340 break; 6341 case L2CAP_RX_STATE_SREJ_SENT: 6342 err = l2cap_rx_state_srej_sent(chan, control, skb, 6343 event); 6344 break; 6345 case L2CAP_RX_STATE_WAIT_P: 6346 err = l2cap_rx_state_wait_p(chan, control, skb, event); 6347 break; 6348 case L2CAP_RX_STATE_WAIT_F: 6349 err = l2cap_rx_state_wait_f(chan, control, skb, event); 6350 break; 6351 default: 6352 /* shut it down */ 6353 break; 6354 } 6355 } else { 6356 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", 6357 control->reqseq, chan->next_tx_seq, 6358 chan->expected_ack_seq); 6359 l2cap_send_disconn_req(chan, ECONNRESET); 6360 } 6361 6362 return err; 6363 } 6364 6365 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6366 struct sk_buff *skb) 6367 { 6368 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store 6369 * the txseq field in advance to use it after l2cap_reassemble_sdu 6370 * returns and to avoid the race condition, for example: 6371 * 6372 * The current thread calls: 6373 * l2cap_reassemble_sdu 6374 * chan->ops->recv == l2cap_sock_recv_cb 6375 * __sock_queue_rcv_skb 6376 * Another thread calls: 6377 * bt_sock_recvmsg 6378 * skb_recv_datagram 6379 * skb_free_datagram 6380 * Then the current thread tries to access control, but it was freed by 6381 * skb_free_datagram. 6382 */ 6383 u16 txseq = control->txseq; 6384 6385 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, 6386 chan->rx_state); 6387 6388 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { 6389 l2cap_pass_to_tx(chan, control); 6390 6391 BT_DBG("buffer_seq %u->%u", chan->buffer_seq, 6392 __next_seq(chan, chan->buffer_seq)); 6393 6394 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 6395 6396 l2cap_reassemble_sdu(chan, skb, control); 6397 } else { 6398 if (chan->sdu) { 6399 kfree_skb(chan->sdu); 6400 chan->sdu = NULL; 6401 } 6402 chan->sdu_last_frag = NULL; 6403 chan->sdu_len = 0; 6404 6405 if (skb) { 6406 BT_DBG("Freeing %p", skb); 6407 kfree_skb(skb); 6408 } 6409 } 6410 6411 chan->last_acked_seq = txseq; 6412 chan->expected_tx_seq = __next_seq(chan, txseq); 6413 6414 return 0; 6415 } 6416 6417 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6418 { 6419 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; 6420 u16 len; 6421 u8 event; 6422 6423 __unpack_control(chan, skb); 6424 6425 len = skb->len; 6426 6427 /* 6428 * We can just drop the corrupted I-frame here. 6429 * Receiver will miss it and start proper recovery 6430 * procedures and ask for retransmission. 6431 */ 6432 if (l2cap_check_fcs(chan, skb)) 6433 goto drop; 6434 6435 if (!control->sframe && control->sar == L2CAP_SAR_START) 6436 len -= L2CAP_SDULEN_SIZE; 6437 6438 if (chan->fcs == L2CAP_FCS_CRC16) 6439 len -= L2CAP_FCS_SIZE; 6440 6441 if (len > chan->mps) { 6442 l2cap_send_disconn_req(chan, ECONNRESET); 6443 goto drop; 6444 } 6445 6446 if (chan->ops->filter) { 6447 if (chan->ops->filter(chan, skb)) 6448 goto drop; 6449 } 6450 6451 if (!control->sframe) { 6452 int err; 6453 6454 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", 6455 control->sar, control->reqseq, control->final, 6456 control->txseq); 6457 6458 /* Validate F-bit - F=0 always valid, F=1 only 6459 * valid in TX WAIT_F 6460 */ 6461 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) 6462 goto drop; 6463 6464 if (chan->mode != L2CAP_MODE_STREAMING) { 6465 event = L2CAP_EV_RECV_IFRAME; 6466 err = l2cap_rx(chan, control, skb, event); 6467 } else { 6468 err = l2cap_stream_rx(chan, control, skb); 6469 } 6470 6471 if (err) 6472 l2cap_send_disconn_req(chan, ECONNRESET); 6473 } else { 6474 const u8 rx_func_to_event[4] = { 6475 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, 6476 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ 6477 }; 6478 6479 /* Only I-frames are expected in streaming mode */ 6480 if (chan->mode == L2CAP_MODE_STREAMING) 6481 goto drop; 6482 6483 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", 6484 control->reqseq, control->final, control->poll, 6485 control->super); 6486 6487 if (len != 0) { 6488 BT_ERR("Trailing bytes: %d in sframe", len); 6489 l2cap_send_disconn_req(chan, ECONNRESET); 6490 goto drop; 6491 } 6492 6493 /* Validate F and P bits */ 6494 if (control->final && (control->poll || 6495 chan->tx_state != L2CAP_TX_STATE_WAIT_F)) 6496 goto drop; 6497 6498 event = rx_func_to_event[control->super]; 6499 if (l2cap_rx(chan, control, skb, event)) 6500 l2cap_send_disconn_req(chan, ECONNRESET); 6501 } 6502 6503 return 0; 6504 6505 drop: 6506 kfree_skb(skb); 6507 return 0; 6508 } 6509 6510 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) 6511 { 6512 struct l2cap_conn *conn = chan->conn; 6513 struct l2cap_le_credits pkt; 6514 u16 return_credits; 6515 6516 return_credits = (chan->imtu / chan->mps) + 1; 6517 6518 if (chan->rx_credits >= return_credits) 6519 return; 6520 6521 return_credits -= chan->rx_credits; 6522 6523 BT_DBG("chan %p returning %u credits to sender", chan, return_credits); 6524 6525 chan->rx_credits += return_credits; 6526 6527 pkt.cid = cpu_to_le16(chan->scid); 6528 pkt.credits = cpu_to_le16(return_credits); 6529 6530 chan->ident = l2cap_get_ident(conn); 6531 6532 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); 6533 } 6534 6535 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) 6536 { 6537 int err; 6538 6539 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); 6540 6541 /* Wait recv to confirm reception before updating the credits */ 6542 err = chan->ops->recv(chan, skb); 6543 6544 /* Update credits whenever an SDU is received */ 6545 l2cap_chan_le_send_credits(chan); 6546 6547 return err; 6548 } 6549 6550 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6551 { 6552 int err; 6553 6554 if (!chan->rx_credits) { 6555 BT_ERR("No credits to receive LE L2CAP data"); 6556 l2cap_send_disconn_req(chan, ECONNRESET); 6557 return -ENOBUFS; 6558 } 6559 6560 if (chan->imtu < skb->len) { 6561 BT_ERR("Too big LE L2CAP PDU"); 6562 return -ENOBUFS; 6563 } 6564 6565 chan->rx_credits--; 6566 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); 6567 6568 /* Update if remote had run out of credits, this should only happens 6569 * if the remote is not using the entire MPS. 6570 */ 6571 if (!chan->rx_credits) 6572 l2cap_chan_le_send_credits(chan); 6573 6574 err = 0; 6575 6576 if (!chan->sdu) { 6577 u16 sdu_len; 6578 6579 sdu_len = get_unaligned_le16(skb->data); 6580 skb_pull(skb, L2CAP_SDULEN_SIZE); 6581 6582 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", 6583 sdu_len, skb->len, chan->imtu); 6584 6585 if (sdu_len > chan->imtu) { 6586 BT_ERR("Too big LE L2CAP SDU length received"); 6587 err = -EMSGSIZE; 6588 goto failed; 6589 } 6590 6591 if (skb->len > sdu_len) { 6592 BT_ERR("Too much LE L2CAP data received"); 6593 err = -EINVAL; 6594 goto failed; 6595 } 6596 6597 if (skb->len == sdu_len) 6598 return l2cap_ecred_recv(chan, skb); 6599 6600 chan->sdu = skb; 6601 chan->sdu_len = sdu_len; 6602 chan->sdu_last_frag = skb; 6603 6604 /* Detect if remote is not able to use the selected MPS */ 6605 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { 6606 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; 6607 6608 /* Adjust the number of credits */ 6609 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); 6610 chan->mps = mps_len; 6611 l2cap_chan_le_send_credits(chan); 6612 } 6613 6614 return 0; 6615 } 6616 6617 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", 6618 chan->sdu->len, skb->len, chan->sdu_len); 6619 6620 if (chan->sdu->len + skb->len > chan->sdu_len) { 6621 BT_ERR("Too much LE L2CAP data received"); 6622 err = -EINVAL; 6623 goto failed; 6624 } 6625 6626 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); 6627 skb = NULL; 6628 6629 if (chan->sdu->len == chan->sdu_len) { 6630 err = l2cap_ecred_recv(chan, chan->sdu); 6631 if (!err) { 6632 chan->sdu = NULL; 6633 chan->sdu_last_frag = NULL; 6634 chan->sdu_len = 0; 6635 } 6636 } 6637 6638 failed: 6639 if (err) { 6640 kfree_skb(skb); 6641 kfree_skb(chan->sdu); 6642 chan->sdu = NULL; 6643 chan->sdu_last_frag = NULL; 6644 chan->sdu_len = 0; 6645 } 6646 6647 /* We can't return an error here since we took care of the skb 6648 * freeing internally. An error return would cause the caller to 6649 * do a double-free of the skb. 6650 */ 6651 return 0; 6652 } 6653 6654 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, 6655 struct sk_buff *skb) 6656 { 6657 struct l2cap_chan *chan; 6658 6659 chan = l2cap_get_chan_by_scid(conn, cid); 6660 if (!chan) { 6661 BT_DBG("unknown cid 0x%4.4x", cid); 6662 /* Drop packet and return */ 6663 kfree_skb(skb); 6664 return; 6665 } 6666 6667 BT_DBG("chan %p, len %d", chan, skb->len); 6668 6669 /* If we receive data on a fixed channel before the info req/rsp 6670 * procedure is done simply assume that the channel is supported 6671 * and mark it as ready. 6672 */ 6673 if (chan->chan_type == L2CAP_CHAN_FIXED) 6674 l2cap_chan_ready(chan); 6675 6676 if (chan->state != BT_CONNECTED) 6677 goto drop; 6678 6679 switch (chan->mode) { 6680 case L2CAP_MODE_LE_FLOWCTL: 6681 case L2CAP_MODE_EXT_FLOWCTL: 6682 if (l2cap_ecred_data_rcv(chan, skb) < 0) 6683 goto drop; 6684 6685 goto done; 6686 6687 case L2CAP_MODE_BASIC: 6688 /* If socket recv buffers overflows we drop data here 6689 * which is *bad* because L2CAP has to be reliable. 6690 * But we don't have any other choice. L2CAP doesn't 6691 * provide flow control mechanism. */ 6692 6693 if (chan->imtu < skb->len) { 6694 BT_ERR("Dropping L2CAP data: receive buffer overflow"); 6695 goto drop; 6696 } 6697 6698 if (!chan->ops->recv(chan, skb)) 6699 goto done; 6700 break; 6701 6702 case L2CAP_MODE_ERTM: 6703 case L2CAP_MODE_STREAMING: 6704 l2cap_data_rcv(chan, skb); 6705 goto done; 6706 6707 default: 6708 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); 6709 break; 6710 } 6711 6712 drop: 6713 kfree_skb(skb); 6714 6715 done: 6716 l2cap_chan_unlock(chan); 6717 l2cap_chan_put(chan); 6718 } 6719 6720 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, 6721 struct sk_buff *skb) 6722 { 6723 struct hci_conn *hcon = conn->hcon; 6724 struct l2cap_chan *chan; 6725 6726 if (hcon->type != ACL_LINK) 6727 goto free_skb; 6728 6729 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, 6730 ACL_LINK); 6731 if (!chan) 6732 goto free_skb; 6733 6734 BT_DBG("chan %p, len %d", chan, skb->len); 6735 6736 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 6737 goto drop; 6738 6739 if (chan->imtu < skb->len) 6740 goto drop; 6741 6742 /* Store remote BD_ADDR and PSM for msg_name */ 6743 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); 6744 bt_cb(skb)->l2cap.psm = psm; 6745 6746 if (!chan->ops->recv(chan, skb)) { 6747 l2cap_chan_put(chan); 6748 return; 6749 } 6750 6751 drop: 6752 l2cap_chan_put(chan); 6753 free_skb: 6754 kfree_skb(skb); 6755 } 6756 6757 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 6758 { 6759 struct l2cap_hdr *lh = (void *) skb->data; 6760 struct hci_conn *hcon = conn->hcon; 6761 u16 cid, len; 6762 __le16 psm; 6763 6764 if (hcon->state != BT_CONNECTED) { 6765 BT_DBG("queueing pending rx skb"); 6766 skb_queue_tail(&conn->pending_rx, skb); 6767 return; 6768 } 6769 6770 skb_pull(skb, L2CAP_HDR_SIZE); 6771 cid = __le16_to_cpu(lh->cid); 6772 len = __le16_to_cpu(lh->len); 6773 6774 if (len != skb->len) { 6775 kfree_skb(skb); 6776 return; 6777 } 6778 6779 /* Since we can't actively block incoming LE connections we must 6780 * at least ensure that we ignore incoming data from them. 6781 */ 6782 if (hcon->type == LE_LINK && 6783 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst, 6784 bdaddr_dst_type(hcon))) { 6785 kfree_skb(skb); 6786 return; 6787 } 6788 6789 BT_DBG("len %d, cid 0x%4.4x", len, cid); 6790 6791 switch (cid) { 6792 case L2CAP_CID_SIGNALING: 6793 l2cap_sig_channel(conn, skb); 6794 break; 6795 6796 case L2CAP_CID_CONN_LESS: 6797 psm = get_unaligned((__le16 *) skb->data); 6798 skb_pull(skb, L2CAP_PSMLEN_SIZE); 6799 l2cap_conless_channel(conn, psm, skb); 6800 break; 6801 6802 case L2CAP_CID_LE_SIGNALING: 6803 l2cap_le_sig_channel(conn, skb); 6804 break; 6805 6806 default: 6807 l2cap_data_channel(conn, cid, skb); 6808 break; 6809 } 6810 } 6811 6812 static void process_pending_rx(struct work_struct *work) 6813 { 6814 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 6815 pending_rx_work); 6816 struct sk_buff *skb; 6817 6818 BT_DBG(""); 6819 6820 while ((skb = skb_dequeue(&conn->pending_rx))) 6821 l2cap_recv_frame(conn, skb); 6822 } 6823 6824 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) 6825 { 6826 struct l2cap_conn *conn = hcon->l2cap_data; 6827 struct hci_chan *hchan; 6828 6829 if (conn) 6830 return conn; 6831 6832 hchan = hci_chan_create(hcon); 6833 if (!hchan) 6834 return NULL; 6835 6836 conn = kzalloc(sizeof(*conn), GFP_KERNEL); 6837 if (!conn) { 6838 hci_chan_del(hchan); 6839 return NULL; 6840 } 6841 6842 kref_init(&conn->ref); 6843 hcon->l2cap_data = conn; 6844 conn->hcon = hci_conn_get(hcon); 6845 conn->hchan = hchan; 6846 6847 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 6848 6849 switch (hcon->type) { 6850 case LE_LINK: 6851 if (hcon->hdev->le_mtu) { 6852 conn->mtu = hcon->hdev->le_mtu; 6853 break; 6854 } 6855 fallthrough; 6856 default: 6857 conn->mtu = hcon->hdev->acl_mtu; 6858 break; 6859 } 6860 6861 conn->feat_mask = 0; 6862 6863 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; 6864 6865 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && 6866 (bredr_sc_enabled(hcon->hdev) || 6867 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) 6868 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; 6869 6870 mutex_init(&conn->ident_lock); 6871 mutex_init(&conn->chan_lock); 6872 6873 INIT_LIST_HEAD(&conn->chan_l); 6874 INIT_LIST_HEAD(&conn->users); 6875 6876 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); 6877 6878 skb_queue_head_init(&conn->pending_rx); 6879 INIT_WORK(&conn->pending_rx_work, process_pending_rx); 6880 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); 6881 6882 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 6883 6884 return conn; 6885 } 6886 6887 static bool is_valid_psm(u16 psm, u8 dst_type) 6888 { 6889 if (!psm) 6890 return false; 6891 6892 if (bdaddr_type_is_le(dst_type)) 6893 return (psm <= 0x00ff); 6894 6895 /* PSM must be odd and lsb of upper byte must be 0 */ 6896 return ((psm & 0x0101) == 0x0001); 6897 } 6898 6899 struct l2cap_chan_data { 6900 struct l2cap_chan *chan; 6901 struct pid *pid; 6902 int count; 6903 }; 6904 6905 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) 6906 { 6907 struct l2cap_chan_data *d = data; 6908 struct pid *pid; 6909 6910 if (chan == d->chan) 6911 return; 6912 6913 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) 6914 return; 6915 6916 pid = chan->ops->get_peer_pid(chan); 6917 6918 /* Only count deferred channels with the same PID/PSM */ 6919 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || 6920 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) 6921 return; 6922 6923 d->count++; 6924 } 6925 6926 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, 6927 bdaddr_t *dst, u8 dst_type) 6928 { 6929 struct l2cap_conn *conn; 6930 struct hci_conn *hcon; 6931 struct hci_dev *hdev; 6932 int err; 6933 6934 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, 6935 dst, dst_type, __le16_to_cpu(psm), chan->mode); 6936 6937 hdev = hci_get_route(dst, &chan->src, chan->src_type); 6938 if (!hdev) 6939 return -EHOSTUNREACH; 6940 6941 hci_dev_lock(hdev); 6942 6943 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && 6944 chan->chan_type != L2CAP_CHAN_RAW) { 6945 err = -EINVAL; 6946 goto done; 6947 } 6948 6949 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { 6950 err = -EINVAL; 6951 goto done; 6952 } 6953 6954 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { 6955 err = -EINVAL; 6956 goto done; 6957 } 6958 6959 switch (chan->mode) { 6960 case L2CAP_MODE_BASIC: 6961 break; 6962 case L2CAP_MODE_LE_FLOWCTL: 6963 break; 6964 case L2CAP_MODE_EXT_FLOWCTL: 6965 if (!enable_ecred) { 6966 err = -EOPNOTSUPP; 6967 goto done; 6968 } 6969 break; 6970 case L2CAP_MODE_ERTM: 6971 case L2CAP_MODE_STREAMING: 6972 if (!disable_ertm) 6973 break; 6974 fallthrough; 6975 default: 6976 err = -EOPNOTSUPP; 6977 goto done; 6978 } 6979 6980 switch (chan->state) { 6981 case BT_CONNECT: 6982 case BT_CONNECT2: 6983 case BT_CONFIG: 6984 /* Already connecting */ 6985 err = 0; 6986 goto done; 6987 6988 case BT_CONNECTED: 6989 /* Already connected */ 6990 err = -EISCONN; 6991 goto done; 6992 6993 case BT_OPEN: 6994 case BT_BOUND: 6995 /* Can connect */ 6996 break; 6997 6998 default: 6999 err = -EBADFD; 7000 goto done; 7001 } 7002 7003 /* Set destination address and psm */ 7004 bacpy(&chan->dst, dst); 7005 chan->dst_type = dst_type; 7006 7007 chan->psm = psm; 7008 chan->dcid = cid; 7009 7010 if (bdaddr_type_is_le(dst_type)) { 7011 /* Convert from L2CAP channel address type to HCI address type 7012 */ 7013 if (dst_type == BDADDR_LE_PUBLIC) 7014 dst_type = ADDR_LE_DEV_PUBLIC; 7015 else 7016 dst_type = ADDR_LE_DEV_RANDOM; 7017 7018 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 7019 hcon = hci_connect_le(hdev, dst, dst_type, false, 7020 chan->sec_level, 7021 HCI_LE_CONN_TIMEOUT, 7022 HCI_ROLE_SLAVE); 7023 else 7024 hcon = hci_connect_le_scan(hdev, dst, dst_type, 7025 chan->sec_level, 7026 HCI_LE_CONN_TIMEOUT, 7027 CONN_REASON_L2CAP_CHAN); 7028 7029 } else { 7030 u8 auth_type = l2cap_get_auth_type(chan); 7031 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, 7032 CONN_REASON_L2CAP_CHAN); 7033 } 7034 7035 if (IS_ERR(hcon)) { 7036 err = PTR_ERR(hcon); 7037 goto done; 7038 } 7039 7040 conn = l2cap_conn_add(hcon); 7041 if (!conn) { 7042 hci_conn_drop(hcon); 7043 err = -ENOMEM; 7044 goto done; 7045 } 7046 7047 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { 7048 struct l2cap_chan_data data; 7049 7050 data.chan = chan; 7051 data.pid = chan->ops->get_peer_pid(chan); 7052 data.count = 1; 7053 7054 l2cap_chan_list(conn, l2cap_chan_by_pid, &data); 7055 7056 /* Check if there isn't too many channels being connected */ 7057 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { 7058 hci_conn_drop(hcon); 7059 err = -EPROTO; 7060 goto done; 7061 } 7062 } 7063 7064 mutex_lock(&conn->chan_lock); 7065 l2cap_chan_lock(chan); 7066 7067 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { 7068 hci_conn_drop(hcon); 7069 err = -EBUSY; 7070 goto chan_unlock; 7071 } 7072 7073 /* Update source addr of the socket */ 7074 bacpy(&chan->src, &hcon->src); 7075 chan->src_type = bdaddr_src_type(hcon); 7076 7077 __l2cap_chan_add(conn, chan); 7078 7079 /* l2cap_chan_add takes its own ref so we can drop this one */ 7080 hci_conn_drop(hcon); 7081 7082 l2cap_state_change(chan, BT_CONNECT); 7083 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 7084 7085 /* Release chan->sport so that it can be reused by other 7086 * sockets (as it's only used for listening sockets). 7087 */ 7088 write_lock(&chan_list_lock); 7089 chan->sport = 0; 7090 write_unlock(&chan_list_lock); 7091 7092 if (hcon->state == BT_CONNECTED) { 7093 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 7094 __clear_chan_timer(chan); 7095 if (l2cap_chan_check_security(chan, true)) 7096 l2cap_state_change(chan, BT_CONNECTED); 7097 } else 7098 l2cap_do_start(chan); 7099 } 7100 7101 err = 0; 7102 7103 chan_unlock: 7104 l2cap_chan_unlock(chan); 7105 mutex_unlock(&conn->chan_lock); 7106 done: 7107 hci_dev_unlock(hdev); 7108 hci_dev_put(hdev); 7109 return err; 7110 } 7111 EXPORT_SYMBOL_GPL(l2cap_chan_connect); 7112 7113 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) 7114 { 7115 struct l2cap_conn *conn = chan->conn; 7116 struct { 7117 struct l2cap_ecred_reconf_req req; 7118 __le16 scid; 7119 } pdu; 7120 7121 pdu.req.mtu = cpu_to_le16(chan->imtu); 7122 pdu.req.mps = cpu_to_le16(chan->mps); 7123 pdu.scid = cpu_to_le16(chan->scid); 7124 7125 chan->ident = l2cap_get_ident(conn); 7126 7127 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, 7128 sizeof(pdu), &pdu); 7129 } 7130 7131 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) 7132 { 7133 if (chan->imtu > mtu) 7134 return -EINVAL; 7135 7136 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); 7137 7138 chan->imtu = mtu; 7139 7140 l2cap_ecred_reconfigure(chan); 7141 7142 return 0; 7143 } 7144 7145 /* ---- L2CAP interface with lower layer (HCI) ---- */ 7146 7147 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 7148 { 7149 int exact = 0, lm1 = 0, lm2 = 0; 7150 struct l2cap_chan *c; 7151 7152 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); 7153 7154 /* Find listening sockets and check their link_mode */ 7155 read_lock(&chan_list_lock); 7156 list_for_each_entry(c, &chan_list, global_l) { 7157 if (c->state != BT_LISTEN) 7158 continue; 7159 7160 if (!bacmp(&c->src, &hdev->bdaddr)) { 7161 lm1 |= HCI_LM_ACCEPT; 7162 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 7163 lm1 |= HCI_LM_MASTER; 7164 exact++; 7165 } else if (!bacmp(&c->src, BDADDR_ANY)) { 7166 lm2 |= HCI_LM_ACCEPT; 7167 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 7168 lm2 |= HCI_LM_MASTER; 7169 } 7170 } 7171 read_unlock(&chan_list_lock); 7172 7173 return exact ? lm1 : lm2; 7174 } 7175 7176 /* Find the next fixed channel in BT_LISTEN state, continue iteration 7177 * from an existing channel in the list or from the beginning of the 7178 * global list (by passing NULL as first parameter). 7179 */ 7180 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, 7181 struct hci_conn *hcon) 7182 { 7183 u8 src_type = bdaddr_src_type(hcon); 7184 7185 read_lock(&chan_list_lock); 7186 7187 if (c) 7188 c = list_next_entry(c, global_l); 7189 else 7190 c = list_entry(chan_list.next, typeof(*c), global_l); 7191 7192 list_for_each_entry_from(c, &chan_list, global_l) { 7193 if (c->chan_type != L2CAP_CHAN_FIXED) 7194 continue; 7195 if (c->state != BT_LISTEN) 7196 continue; 7197 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) 7198 continue; 7199 if (src_type != c->src_type) 7200 continue; 7201 7202 c = l2cap_chan_hold_unless_zero(c); 7203 read_unlock(&chan_list_lock); 7204 return c; 7205 } 7206 7207 read_unlock(&chan_list_lock); 7208 7209 return NULL; 7210 } 7211 7212 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 7213 { 7214 struct hci_dev *hdev = hcon->hdev; 7215 struct l2cap_conn *conn; 7216 struct l2cap_chan *pchan; 7217 u8 dst_type; 7218 7219 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 7220 return; 7221 7222 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); 7223 7224 if (status) { 7225 l2cap_conn_del(hcon, bt_to_errno(status)); 7226 return; 7227 } 7228 7229 conn = l2cap_conn_add(hcon); 7230 if (!conn) 7231 return; 7232 7233 dst_type = bdaddr_dst_type(hcon); 7234 7235 /* If device is blocked, do not create channels for it */ 7236 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type)) 7237 return; 7238 7239 /* Find fixed channels and notify them of the new connection. We 7240 * use multiple individual lookups, continuing each time where 7241 * we left off, because the list lock would prevent calling the 7242 * potentially sleeping l2cap_chan_lock() function. 7243 */ 7244 pchan = l2cap_global_fixed_chan(NULL, hcon); 7245 while (pchan) { 7246 struct l2cap_chan *chan, *next; 7247 7248 /* Client fixed channels should override server ones */ 7249 if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) 7250 goto next; 7251 7252 l2cap_chan_lock(pchan); 7253 chan = pchan->ops->new_connection(pchan); 7254 if (chan) { 7255 bacpy(&chan->src, &hcon->src); 7256 bacpy(&chan->dst, &hcon->dst); 7257 chan->src_type = bdaddr_src_type(hcon); 7258 chan->dst_type = dst_type; 7259 7260 __l2cap_chan_add(conn, chan); 7261 } 7262 7263 l2cap_chan_unlock(pchan); 7264 next: 7265 next = l2cap_global_fixed_chan(pchan, hcon); 7266 l2cap_chan_put(pchan); 7267 pchan = next; 7268 } 7269 7270 l2cap_conn_ready(conn); 7271 } 7272 7273 int l2cap_disconn_ind(struct hci_conn *hcon) 7274 { 7275 struct l2cap_conn *conn = hcon->l2cap_data; 7276 7277 BT_DBG("hcon %p", hcon); 7278 7279 if (!conn) 7280 return HCI_ERROR_REMOTE_USER_TERM; 7281 return conn->disc_reason; 7282 } 7283 7284 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) 7285 { 7286 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 7287 return; 7288 7289 BT_DBG("hcon %p reason %d", hcon, reason); 7290 7291 l2cap_conn_del(hcon, bt_to_errno(reason)); 7292 } 7293 7294 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 7295 { 7296 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 7297 return; 7298 7299 if (encrypt == 0x00) { 7300 if (chan->sec_level == BT_SECURITY_MEDIUM) { 7301 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 7302 } else if (chan->sec_level == BT_SECURITY_HIGH || 7303 chan->sec_level == BT_SECURITY_FIPS) 7304 l2cap_chan_close(chan, ECONNREFUSED); 7305 } else { 7306 if (chan->sec_level == BT_SECURITY_MEDIUM) 7307 __clear_chan_timer(chan); 7308 } 7309 } 7310 7311 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 7312 { 7313 struct l2cap_conn *conn = hcon->l2cap_data; 7314 struct l2cap_chan *chan; 7315 7316 if (!conn) 7317 return; 7318 7319 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); 7320 7321 mutex_lock(&conn->chan_lock); 7322 7323 list_for_each_entry(chan, &conn->chan_l, list) { 7324 l2cap_chan_lock(chan); 7325 7326 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, 7327 state_to_string(chan->state)); 7328 7329 if (!status && encrypt) 7330 chan->sec_level = hcon->sec_level; 7331 7332 if (!__l2cap_no_conn_pending(chan)) { 7333 l2cap_chan_unlock(chan); 7334 continue; 7335 } 7336 7337 if (!status && (chan->state == BT_CONNECTED || 7338 chan->state == BT_CONFIG)) { 7339 chan->ops->resume(chan); 7340 l2cap_check_encryption(chan, encrypt); 7341 l2cap_chan_unlock(chan); 7342 continue; 7343 } 7344 7345 if (chan->state == BT_CONNECT) { 7346 if (!status && l2cap_check_enc_key_size(hcon)) 7347 l2cap_start_connection(chan); 7348 else 7349 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 7350 } else if (chan->state == BT_CONNECT2 && 7351 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || 7352 chan->mode == L2CAP_MODE_LE_FLOWCTL)) { 7353 struct l2cap_conn_rsp rsp; 7354 __u16 res, stat; 7355 7356 if (!status && l2cap_check_enc_key_size(hcon)) { 7357 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 7358 res = L2CAP_CR_PEND; 7359 stat = L2CAP_CS_AUTHOR_PEND; 7360 chan->ops->defer(chan); 7361 } else { 7362 l2cap_state_change(chan, BT_CONFIG); 7363 res = L2CAP_CR_SUCCESS; 7364 stat = L2CAP_CS_NO_INFO; 7365 } 7366 } else { 7367 l2cap_state_change(chan, BT_DISCONN); 7368 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 7369 res = L2CAP_CR_SEC_BLOCK; 7370 stat = L2CAP_CS_NO_INFO; 7371 } 7372 7373 rsp.scid = cpu_to_le16(chan->dcid); 7374 rsp.dcid = cpu_to_le16(chan->scid); 7375 rsp.result = cpu_to_le16(res); 7376 rsp.status = cpu_to_le16(stat); 7377 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 7378 sizeof(rsp), &rsp); 7379 7380 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && 7381 res == L2CAP_CR_SUCCESS) { 7382 char buf[128]; 7383 set_bit(CONF_REQ_SENT, &chan->conf_state); 7384 l2cap_send_cmd(conn, l2cap_get_ident(conn), 7385 L2CAP_CONF_REQ, 7386 l2cap_build_conf_req(chan, buf, sizeof(buf)), 7387 buf); 7388 chan->num_conf_req++; 7389 } 7390 } 7391 7392 l2cap_chan_unlock(chan); 7393 } 7394 7395 mutex_unlock(&conn->chan_lock); 7396 } 7397 7398 /* Append fragment into frame respecting the maximum len of rx_skb */ 7399 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, 7400 u16 len) 7401 { 7402 if (!conn->rx_skb) { 7403 /* Allocate skb for the complete frame (with header) */ 7404 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); 7405 if (!conn->rx_skb) 7406 return -ENOMEM; 7407 /* Init rx_len */ 7408 conn->rx_len = len; 7409 } 7410 7411 /* Copy as much as the rx_skb can hold */ 7412 len = min_t(u16, len, skb->len); 7413 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); 7414 skb_pull(skb, len); 7415 conn->rx_len -= len; 7416 7417 return len; 7418 } 7419 7420 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) 7421 { 7422 struct sk_buff *rx_skb; 7423 int len; 7424 7425 /* Append just enough to complete the header */ 7426 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); 7427 7428 /* If header could not be read just continue */ 7429 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) 7430 return len; 7431 7432 rx_skb = conn->rx_skb; 7433 len = get_unaligned_le16(rx_skb->data); 7434 7435 /* Check if rx_skb has enough space to received all fragments */ 7436 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { 7437 /* Update expected len */ 7438 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); 7439 return L2CAP_LEN_SIZE; 7440 } 7441 7442 /* Reset conn->rx_skb since it will need to be reallocated in order to 7443 * fit all fragments. 7444 */ 7445 conn->rx_skb = NULL; 7446 7447 /* Reallocates rx_skb using the exact expected length */ 7448 len = l2cap_recv_frag(conn, rx_skb, 7449 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); 7450 kfree_skb(rx_skb); 7451 7452 return len; 7453 } 7454 7455 static void l2cap_recv_reset(struct l2cap_conn *conn) 7456 { 7457 kfree_skb(conn->rx_skb); 7458 conn->rx_skb = NULL; 7459 conn->rx_len = 0; 7460 } 7461 7462 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) 7463 { 7464 struct l2cap_conn *conn = hcon->l2cap_data; 7465 int len; 7466 7467 /* For AMP controller do not create l2cap conn */ 7468 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY) 7469 goto drop; 7470 7471 if (!conn) 7472 conn = l2cap_conn_add(hcon); 7473 7474 if (!conn) 7475 goto drop; 7476 7477 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); 7478 7479 switch (flags) { 7480 case ACL_START: 7481 case ACL_START_NO_FLUSH: 7482 case ACL_COMPLETE: 7483 if (conn->rx_skb) { 7484 BT_ERR("Unexpected start frame (len %d)", skb->len); 7485 l2cap_recv_reset(conn); 7486 l2cap_conn_unreliable(conn, ECOMM); 7487 } 7488 7489 /* Start fragment may not contain the L2CAP length so just 7490 * copy the initial byte when that happens and use conn->mtu as 7491 * expected length. 7492 */ 7493 if (skb->len < L2CAP_LEN_SIZE) { 7494 l2cap_recv_frag(conn, skb, conn->mtu); 7495 break; 7496 } 7497 7498 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; 7499 7500 if (len == skb->len) { 7501 /* Complete frame received */ 7502 l2cap_recv_frame(conn, skb); 7503 return; 7504 } 7505 7506 BT_DBG("Start: total len %d, frag len %u", len, skb->len); 7507 7508 if (skb->len > len) { 7509 BT_ERR("Frame is too long (len %u, expected len %d)", 7510 skb->len, len); 7511 l2cap_conn_unreliable(conn, ECOMM); 7512 goto drop; 7513 } 7514 7515 /* Append fragment into frame (with header) */ 7516 if (l2cap_recv_frag(conn, skb, len) < 0) 7517 goto drop; 7518 7519 break; 7520 7521 case ACL_CONT: 7522 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); 7523 7524 if (!conn->rx_skb) { 7525 BT_ERR("Unexpected continuation frame (len %d)", skb->len); 7526 l2cap_conn_unreliable(conn, ECOMM); 7527 goto drop; 7528 } 7529 7530 /* Complete the L2CAP length if it has not been read */ 7531 if (conn->rx_skb->len < L2CAP_LEN_SIZE) { 7532 if (l2cap_recv_len(conn, skb) < 0) { 7533 l2cap_conn_unreliable(conn, ECOMM); 7534 goto drop; 7535 } 7536 7537 /* Header still could not be read just continue */ 7538 if (conn->rx_skb->len < L2CAP_LEN_SIZE) 7539 break; 7540 } 7541 7542 if (skb->len > conn->rx_len) { 7543 BT_ERR("Fragment is too long (len %u, expected %u)", 7544 skb->len, conn->rx_len); 7545 l2cap_recv_reset(conn); 7546 l2cap_conn_unreliable(conn, ECOMM); 7547 goto drop; 7548 } 7549 7550 /* Append fragment into frame (with header) */ 7551 l2cap_recv_frag(conn, skb, skb->len); 7552 7553 if (!conn->rx_len) { 7554 /* Complete frame received. l2cap_recv_frame 7555 * takes ownership of the skb so set the global 7556 * rx_skb pointer to NULL first. 7557 */ 7558 struct sk_buff *rx_skb = conn->rx_skb; 7559 conn->rx_skb = NULL; 7560 l2cap_recv_frame(conn, rx_skb); 7561 } 7562 break; 7563 } 7564 7565 drop: 7566 kfree_skb(skb); 7567 } 7568 7569 static struct hci_cb l2cap_cb = { 7570 .name = "L2CAP", 7571 .connect_cfm = l2cap_connect_cfm, 7572 .disconn_cfm = l2cap_disconn_cfm, 7573 .security_cfm = l2cap_security_cfm, 7574 }; 7575 7576 static int l2cap_debugfs_show(struct seq_file *f, void *p) 7577 { 7578 struct l2cap_chan *c; 7579 7580 read_lock(&chan_list_lock); 7581 7582 list_for_each_entry(c, &chan_list, global_l) { 7583 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 7584 &c->src, c->src_type, &c->dst, c->dst_type, 7585 c->state, __le16_to_cpu(c->psm), 7586 c->scid, c->dcid, c->imtu, c->omtu, 7587 c->sec_level, c->mode); 7588 } 7589 7590 read_unlock(&chan_list_lock); 7591 7592 return 0; 7593 } 7594 7595 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); 7596 7597 static struct dentry *l2cap_debugfs; 7598 7599 int __init l2cap_init(void) 7600 { 7601 int err; 7602 7603 err = l2cap_init_sockets(); 7604 if (err < 0) 7605 return err; 7606 7607 hci_register_cb(&l2cap_cb); 7608 7609 if (IS_ERR_OR_NULL(bt_debugfs)) 7610 return 0; 7611 7612 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, 7613 NULL, &l2cap_debugfs_fops); 7614 7615 return 0; 7616 } 7617 7618 void l2cap_exit(void) 7619 { 7620 debugfs_remove(l2cap_debugfs); 7621 hci_unregister_cb(&l2cap_cb); 7622 l2cap_cleanup_sockets(); 7623 } 7624 7625 module_param(disable_ertm, bool, 0644); 7626 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 7627 7628 module_param(enable_ecred, bool, 0644); 7629 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode"); 7630