1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 5 Copyright (C) 2010 Google Inc. 6 Copyright (C) 2011 ProFUSION Embedded Systems 7 Copyright (c) 2012 Code Aurora Forum. All rights reserved. 8 9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 10 11 This program is free software; you can redistribute it and/or modify 12 it under the terms of the GNU General Public License version 2 as 13 published by the Free Software Foundation; 14 15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 23 24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 26 SOFTWARE IS DISCLAIMED. 27 */ 28 29 /* Bluetooth L2CAP core. */ 30 31 #include <linux/module.h> 32 33 #include <linux/debugfs.h> 34 #include <linux/crc16.h> 35 36 #include <net/bluetooth/bluetooth.h> 37 #include <net/bluetooth/hci_core.h> 38 #include <net/bluetooth/l2cap.h> 39 40 #include "smp.h" 41 #include "a2mp.h" 42 #include "amp.h" 43 44 #define LE_FLOWCTL_MAX_CREDITS 65535 45 46 bool disable_ertm; 47 48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; 49 50 static LIST_HEAD(chan_list); 51 static DEFINE_RWLOCK(chan_list_lock); 52 53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS; 54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS; 55 56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 57 u8 code, u8 ident, u16 dlen, void *data); 58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 59 void *data); 60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); 62 63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 64 struct sk_buff_head *skbs, u8 event); 65 66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) 67 { 68 if (link_type == LE_LINK) { 69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC) 70 return BDADDR_LE_PUBLIC; 71 else 72 return BDADDR_LE_RANDOM; 73 } 74 75 return BDADDR_BREDR; 76 } 77 78 static inline u8 bdaddr_src_type(struct hci_conn *hcon) 79 { 80 return bdaddr_type(hcon->type, hcon->src_type); 81 } 82 83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon) 84 { 85 return bdaddr_type(hcon->type, hcon->dst_type); 86 } 87 88 /* ---- L2CAP channels ---- */ 89 90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 91 u16 cid) 92 { 93 struct l2cap_chan *c; 94 95 list_for_each_entry(c, &conn->chan_l, list) { 96 if (c->dcid == cid) 97 return c; 98 } 99 return NULL; 100 } 101 102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, 103 u16 cid) 104 { 105 struct l2cap_chan *c; 106 107 list_for_each_entry(c, &conn->chan_l, list) { 108 if (c->scid == cid) 109 return c; 110 } 111 return NULL; 112 } 113 114 /* Find channel with given SCID. 115 * Returns locked channel. */ 116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, 117 u16 cid) 118 { 119 struct l2cap_chan *c; 120 121 mutex_lock(&conn->chan_lock); 122 c = __l2cap_get_chan_by_scid(conn, cid); 123 if (c) 124 l2cap_chan_lock(c); 125 mutex_unlock(&conn->chan_lock); 126 127 return c; 128 } 129 130 /* Find channel with given DCID. 131 * Returns locked channel. 132 */ 133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 134 u16 cid) 135 { 136 struct l2cap_chan *c; 137 138 mutex_lock(&conn->chan_lock); 139 c = __l2cap_get_chan_by_dcid(conn, cid); 140 if (c) 141 l2cap_chan_lock(c); 142 mutex_unlock(&conn->chan_lock); 143 144 return c; 145 } 146 147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, 148 u8 ident) 149 { 150 struct l2cap_chan *c; 151 152 list_for_each_entry(c, &conn->chan_l, list) { 153 if (c->ident == ident) 154 return c; 155 } 156 return NULL; 157 } 158 159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, 160 u8 ident) 161 { 162 struct l2cap_chan *c; 163 164 mutex_lock(&conn->chan_lock); 165 c = __l2cap_get_chan_by_ident(conn, ident); 166 if (c) 167 l2cap_chan_lock(c); 168 mutex_unlock(&conn->chan_lock); 169 170 return c; 171 } 172 173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) 174 { 175 struct l2cap_chan *c; 176 177 list_for_each_entry(c, &chan_list, global_l) { 178 if (c->sport == psm && !bacmp(&c->src, src)) 179 return c; 180 } 181 return NULL; 182 } 183 184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) 185 { 186 int err; 187 188 write_lock(&chan_list_lock); 189 190 if (psm && __l2cap_global_chan_by_addr(psm, src)) { 191 err = -EADDRINUSE; 192 goto done; 193 } 194 195 if (psm) { 196 chan->psm = psm; 197 chan->sport = psm; 198 err = 0; 199 } else { 200 u16 p; 201 202 err = -EINVAL; 203 for (p = 0x1001; p < 0x1100; p += 2) 204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) { 205 chan->psm = cpu_to_le16(p); 206 chan->sport = cpu_to_le16(p); 207 err = 0; 208 break; 209 } 210 } 211 212 done: 213 write_unlock(&chan_list_lock); 214 return err; 215 } 216 EXPORT_SYMBOL_GPL(l2cap_add_psm); 217 218 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 219 { 220 write_lock(&chan_list_lock); 221 222 /* Override the defaults (which are for conn-oriented) */ 223 chan->omtu = L2CAP_DEFAULT_MTU; 224 chan->chan_type = L2CAP_CHAN_FIXED; 225 226 chan->scid = scid; 227 228 write_unlock(&chan_list_lock); 229 230 return 0; 231 } 232 233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn) 234 { 235 u16 cid, dyn_end; 236 237 if (conn->hcon->type == LE_LINK) 238 dyn_end = L2CAP_CID_LE_DYN_END; 239 else 240 dyn_end = L2CAP_CID_DYN_END; 241 242 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) { 243 if (!__l2cap_get_chan_by_scid(conn, cid)) 244 return cid; 245 } 246 247 return 0; 248 } 249 250 static void l2cap_state_change(struct l2cap_chan *chan, int state) 251 { 252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), 253 state_to_string(state)); 254 255 chan->state = state; 256 chan->ops->state_change(chan, state, 0); 257 } 258 259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, 260 int state, int err) 261 { 262 chan->state = state; 263 chan->ops->state_change(chan, chan->state, err); 264 } 265 266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) 267 { 268 chan->ops->state_change(chan, chan->state, err); 269 } 270 271 static void __set_retrans_timer(struct l2cap_chan *chan) 272 { 273 if (!delayed_work_pending(&chan->monitor_timer) && 274 chan->retrans_timeout) { 275 l2cap_set_timer(chan, &chan->retrans_timer, 276 msecs_to_jiffies(chan->retrans_timeout)); 277 } 278 } 279 280 static void __set_monitor_timer(struct l2cap_chan *chan) 281 { 282 __clear_retrans_timer(chan); 283 if (chan->monitor_timeout) { 284 l2cap_set_timer(chan, &chan->monitor_timer, 285 msecs_to_jiffies(chan->monitor_timeout)); 286 } 287 } 288 289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, 290 u16 seq) 291 { 292 struct sk_buff *skb; 293 294 skb_queue_walk(head, skb) { 295 if (bt_cb(skb)->l2cap.txseq == seq) 296 return skb; 297 } 298 299 return NULL; 300 } 301 302 /* ---- L2CAP sequence number lists ---- */ 303 304 /* For ERTM, ordered lists of sequence numbers must be tracked for 305 * SREJ requests that are received and for frames that are to be 306 * retransmitted. These seq_list functions implement a singly-linked 307 * list in an array, where membership in the list can also be checked 308 * in constant time. Items can also be added to the tail of the list 309 * and removed from the head in constant time, without further memory 310 * allocs or frees. 311 */ 312 313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) 314 { 315 size_t alloc_size, i; 316 317 /* Allocated size is a power of 2 to map sequence numbers 318 * (which may be up to 14 bits) in to a smaller array that is 319 * sized for the negotiated ERTM transmit windows. 320 */ 321 alloc_size = roundup_pow_of_two(size); 322 323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL); 324 if (!seq_list->list) 325 return -ENOMEM; 326 327 seq_list->mask = alloc_size - 1; 328 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 330 for (i = 0; i < alloc_size; i++) 331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 332 333 return 0; 334 } 335 336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) 337 { 338 kfree(seq_list->list); 339 } 340 341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, 342 u16 seq) 343 { 344 /* Constant-time check for list membership */ 345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; 346 } 347 348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) 349 { 350 u16 seq = seq_list->head; 351 u16 mask = seq_list->mask; 352 353 seq_list->head = seq_list->list[seq & mask]; 354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; 355 356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { 357 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 359 } 360 361 return seq; 362 } 363 364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) 365 { 366 u16 i; 367 368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) 369 return; 370 371 for (i = 0; i <= seq_list->mask; i++) 372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 373 374 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 376 } 377 378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) 379 { 380 u16 mask = seq_list->mask; 381 382 /* All appends happen in constant time */ 383 384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) 385 return; 386 387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) 388 seq_list->head = seq; 389 else 390 seq_list->list[seq_list->tail & mask] = seq; 391 392 seq_list->tail = seq; 393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; 394 } 395 396 static void l2cap_chan_timeout(struct work_struct *work) 397 { 398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 399 chan_timer.work); 400 struct l2cap_conn *conn = chan->conn; 401 int reason; 402 403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 404 405 mutex_lock(&conn->chan_lock); 406 l2cap_chan_lock(chan); 407 408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 409 reason = ECONNREFUSED; 410 else if (chan->state == BT_CONNECT && 411 chan->sec_level != BT_SECURITY_SDP) 412 reason = ECONNREFUSED; 413 else 414 reason = ETIMEDOUT; 415 416 l2cap_chan_close(chan, reason); 417 418 l2cap_chan_unlock(chan); 419 420 chan->ops->close(chan); 421 mutex_unlock(&conn->chan_lock); 422 423 l2cap_chan_put(chan); 424 } 425 426 struct l2cap_chan *l2cap_chan_create(void) 427 { 428 struct l2cap_chan *chan; 429 430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC); 431 if (!chan) 432 return NULL; 433 434 mutex_init(&chan->lock); 435 436 /* Set default lock nesting level */ 437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); 438 439 write_lock(&chan_list_lock); 440 list_add(&chan->global_l, &chan_list); 441 write_unlock(&chan_list_lock); 442 443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); 444 445 chan->state = BT_OPEN; 446 447 kref_init(&chan->kref); 448 449 /* This flag is cleared in l2cap_chan_ready() */ 450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 451 452 BT_DBG("chan %p", chan); 453 454 return chan; 455 } 456 EXPORT_SYMBOL_GPL(l2cap_chan_create); 457 458 static void l2cap_chan_destroy(struct kref *kref) 459 { 460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); 461 462 BT_DBG("chan %p", chan); 463 464 write_lock(&chan_list_lock); 465 list_del(&chan->global_l); 466 write_unlock(&chan_list_lock); 467 468 kfree(chan); 469 } 470 471 void l2cap_chan_hold(struct l2cap_chan *c) 472 { 473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount)); 474 475 kref_get(&c->kref); 476 } 477 478 void l2cap_chan_put(struct l2cap_chan *c) 479 { 480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount)); 481 482 kref_put(&c->kref, l2cap_chan_destroy); 483 } 484 EXPORT_SYMBOL_GPL(l2cap_chan_put); 485 486 void l2cap_chan_set_defaults(struct l2cap_chan *chan) 487 { 488 chan->fcs = L2CAP_FCS_CRC16; 489 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 492 chan->remote_max_tx = chan->max_tx; 493 chan->remote_tx_win = chan->tx_win; 494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; 495 chan->sec_level = BT_SECURITY_LOW; 496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; 498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 499 chan->conf_state = 0; 500 501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 502 } 503 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); 504 505 static void l2cap_le_flowctl_init(struct l2cap_chan *chan) 506 { 507 chan->sdu = NULL; 508 chan->sdu_last_frag = NULL; 509 chan->sdu_len = 0; 510 chan->tx_credits = 0; 511 chan->rx_credits = le_max_credits; 512 chan->mps = min_t(u16, chan->imtu, le_default_mps); 513 514 skb_queue_head_init(&chan->tx_q); 515 } 516 517 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 518 { 519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 520 __le16_to_cpu(chan->psm), chan->dcid); 521 522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 523 524 chan->conn = conn; 525 526 switch (chan->chan_type) { 527 case L2CAP_CHAN_CONN_ORIENTED: 528 /* Alloc CID for connection-oriented socket */ 529 chan->scid = l2cap_alloc_cid(conn); 530 if (conn->hcon->type == ACL_LINK) 531 chan->omtu = L2CAP_DEFAULT_MTU; 532 break; 533 534 case L2CAP_CHAN_CONN_LESS: 535 /* Connectionless socket */ 536 chan->scid = L2CAP_CID_CONN_LESS; 537 chan->dcid = L2CAP_CID_CONN_LESS; 538 chan->omtu = L2CAP_DEFAULT_MTU; 539 break; 540 541 case L2CAP_CHAN_FIXED: 542 /* Caller will set CID and CID specific MTU values */ 543 break; 544 545 default: 546 /* Raw socket can send/recv signalling messages only */ 547 chan->scid = L2CAP_CID_SIGNALING; 548 chan->dcid = L2CAP_CID_SIGNALING; 549 chan->omtu = L2CAP_DEFAULT_MTU; 550 } 551 552 chan->local_id = L2CAP_BESTEFFORT_ID; 553 chan->local_stype = L2CAP_SERV_BESTEFFORT; 554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; 555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; 556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; 557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; 558 559 l2cap_chan_hold(chan); 560 561 /* Only keep a reference for fixed channels if they requested it */ 562 if (chan->chan_type != L2CAP_CHAN_FIXED || 563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 564 hci_conn_hold(conn->hcon); 565 566 list_add(&chan->list, &conn->chan_l); 567 } 568 569 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 570 { 571 mutex_lock(&conn->chan_lock); 572 __l2cap_chan_add(conn, chan); 573 mutex_unlock(&conn->chan_lock); 574 } 575 576 void l2cap_chan_del(struct l2cap_chan *chan, int err) 577 { 578 struct l2cap_conn *conn = chan->conn; 579 580 __clear_chan_timer(chan); 581 582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, 583 state_to_string(chan->state)); 584 585 chan->ops->teardown(chan, err); 586 587 if (conn) { 588 struct amp_mgr *mgr = conn->hcon->amp_mgr; 589 /* Delete from channel list */ 590 list_del(&chan->list); 591 592 l2cap_chan_put(chan); 593 594 chan->conn = NULL; 595 596 /* Reference was only held for non-fixed channels or 597 * fixed channels that explicitly requested it using the 598 * FLAG_HOLD_HCI_CONN flag. 599 */ 600 if (chan->chan_type != L2CAP_CHAN_FIXED || 601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 602 hci_conn_drop(conn->hcon); 603 604 if (mgr && mgr->bredr_chan == chan) 605 mgr->bredr_chan = NULL; 606 } 607 608 if (chan->hs_hchan) { 609 struct hci_chan *hs_hchan = chan->hs_hchan; 610 611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan); 612 amp_disconnect_logical_link(hs_hchan); 613 } 614 615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) 616 return; 617 618 switch(chan->mode) { 619 case L2CAP_MODE_BASIC: 620 break; 621 622 case L2CAP_MODE_LE_FLOWCTL: 623 skb_queue_purge(&chan->tx_q); 624 break; 625 626 case L2CAP_MODE_ERTM: 627 __clear_retrans_timer(chan); 628 __clear_monitor_timer(chan); 629 __clear_ack_timer(chan); 630 631 skb_queue_purge(&chan->srej_q); 632 633 l2cap_seq_list_free(&chan->srej_list); 634 l2cap_seq_list_free(&chan->retrans_list); 635 636 /* fall through */ 637 638 case L2CAP_MODE_STREAMING: 639 skb_queue_purge(&chan->tx_q); 640 break; 641 } 642 643 return; 644 } 645 EXPORT_SYMBOL_GPL(l2cap_chan_del); 646 647 static void l2cap_conn_update_id_addr(struct work_struct *work) 648 { 649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 650 id_addr_update_work); 651 struct hci_conn *hcon = conn->hcon; 652 struct l2cap_chan *chan; 653 654 mutex_lock(&conn->chan_lock); 655 656 list_for_each_entry(chan, &conn->chan_l, list) { 657 l2cap_chan_lock(chan); 658 bacpy(&chan->dst, &hcon->dst); 659 chan->dst_type = bdaddr_dst_type(hcon); 660 l2cap_chan_unlock(chan); 661 } 662 663 mutex_unlock(&conn->chan_lock); 664 } 665 666 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) 667 { 668 struct l2cap_conn *conn = chan->conn; 669 struct l2cap_le_conn_rsp rsp; 670 u16 result; 671 672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 673 result = L2CAP_CR_AUTHORIZATION; 674 else 675 result = L2CAP_CR_BAD_PSM; 676 677 l2cap_state_change(chan, BT_DISCONN); 678 679 rsp.dcid = cpu_to_le16(chan->scid); 680 rsp.mtu = cpu_to_le16(chan->imtu); 681 rsp.mps = cpu_to_le16(chan->mps); 682 rsp.credits = cpu_to_le16(chan->rx_credits); 683 rsp.result = cpu_to_le16(result); 684 685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 686 &rsp); 687 } 688 689 static void l2cap_chan_connect_reject(struct l2cap_chan *chan) 690 { 691 struct l2cap_conn *conn = chan->conn; 692 struct l2cap_conn_rsp rsp; 693 u16 result; 694 695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 696 result = L2CAP_CR_SEC_BLOCK; 697 else 698 result = L2CAP_CR_BAD_PSM; 699 700 l2cap_state_change(chan, BT_DISCONN); 701 702 rsp.scid = cpu_to_le16(chan->dcid); 703 rsp.dcid = cpu_to_le16(chan->scid); 704 rsp.result = cpu_to_le16(result); 705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 706 707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); 708 } 709 710 void l2cap_chan_close(struct l2cap_chan *chan, int reason) 711 { 712 struct l2cap_conn *conn = chan->conn; 713 714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 715 716 switch (chan->state) { 717 case BT_LISTEN: 718 chan->ops->teardown(chan, 0); 719 break; 720 721 case BT_CONNECTED: 722 case BT_CONFIG: 723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 725 l2cap_send_disconn_req(chan, reason); 726 } else 727 l2cap_chan_del(chan, reason); 728 break; 729 730 case BT_CONNECT2: 731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 732 if (conn->hcon->type == ACL_LINK) 733 l2cap_chan_connect_reject(chan); 734 else if (conn->hcon->type == LE_LINK) 735 l2cap_chan_le_connect_reject(chan); 736 } 737 738 l2cap_chan_del(chan, reason); 739 break; 740 741 case BT_CONNECT: 742 case BT_DISCONN: 743 l2cap_chan_del(chan, reason); 744 break; 745 746 default: 747 chan->ops->teardown(chan, 0); 748 break; 749 } 750 } 751 EXPORT_SYMBOL(l2cap_chan_close); 752 753 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 754 { 755 switch (chan->chan_type) { 756 case L2CAP_CHAN_RAW: 757 switch (chan->sec_level) { 758 case BT_SECURITY_HIGH: 759 case BT_SECURITY_FIPS: 760 return HCI_AT_DEDICATED_BONDING_MITM; 761 case BT_SECURITY_MEDIUM: 762 return HCI_AT_DEDICATED_BONDING; 763 default: 764 return HCI_AT_NO_BONDING; 765 } 766 break; 767 case L2CAP_CHAN_CONN_LESS: 768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { 769 if (chan->sec_level == BT_SECURITY_LOW) 770 chan->sec_level = BT_SECURITY_SDP; 771 } 772 if (chan->sec_level == BT_SECURITY_HIGH || 773 chan->sec_level == BT_SECURITY_FIPS) 774 return HCI_AT_NO_BONDING_MITM; 775 else 776 return HCI_AT_NO_BONDING; 777 break; 778 case L2CAP_CHAN_CONN_ORIENTED: 779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { 780 if (chan->sec_level == BT_SECURITY_LOW) 781 chan->sec_level = BT_SECURITY_SDP; 782 783 if (chan->sec_level == BT_SECURITY_HIGH || 784 chan->sec_level == BT_SECURITY_FIPS) 785 return HCI_AT_NO_BONDING_MITM; 786 else 787 return HCI_AT_NO_BONDING; 788 } 789 /* fall through */ 790 default: 791 switch (chan->sec_level) { 792 case BT_SECURITY_HIGH: 793 case BT_SECURITY_FIPS: 794 return HCI_AT_GENERAL_BONDING_MITM; 795 case BT_SECURITY_MEDIUM: 796 return HCI_AT_GENERAL_BONDING; 797 default: 798 return HCI_AT_NO_BONDING; 799 } 800 break; 801 } 802 } 803 804 /* Service level security */ 805 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) 806 { 807 struct l2cap_conn *conn = chan->conn; 808 __u8 auth_type; 809 810 if (conn->hcon->type == LE_LINK) 811 return smp_conn_security(conn->hcon, chan->sec_level); 812 813 auth_type = l2cap_get_auth_type(chan); 814 815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type, 816 initiator); 817 } 818 819 static u8 l2cap_get_ident(struct l2cap_conn *conn) 820 { 821 u8 id; 822 823 /* Get next available identificator. 824 * 1 - 128 are used by kernel. 825 * 129 - 199 are reserved. 826 * 200 - 254 are used by utilities like l2ping, etc. 827 */ 828 829 mutex_lock(&conn->ident_lock); 830 831 if (++conn->tx_ident > 128) 832 conn->tx_ident = 1; 833 834 id = conn->tx_ident; 835 836 mutex_unlock(&conn->ident_lock); 837 838 return id; 839 } 840 841 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 842 void *data) 843 { 844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 845 u8 flags; 846 847 BT_DBG("code 0x%2.2x", code); 848 849 if (!skb) 850 return; 851 852 /* Use NO_FLUSH if supported or we have an LE link (which does 853 * not support auto-flushing packets) */ 854 if (lmp_no_flush_capable(conn->hcon->hdev) || 855 conn->hcon->type == LE_LINK) 856 flags = ACL_START_NO_FLUSH; 857 else 858 flags = ACL_START; 859 860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; 861 skb->priority = HCI_PRIO_MAX; 862 863 hci_send_acl(conn->hchan, skb, flags); 864 } 865 866 static bool __chan_is_moving(struct l2cap_chan *chan) 867 { 868 return chan->move_state != L2CAP_MOVE_STABLE && 869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE; 870 } 871 872 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 873 { 874 struct hci_conn *hcon = chan->conn->hcon; 875 u16 flags; 876 877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, 878 skb->priority); 879 880 if (chan->hs_hcon && !__chan_is_moving(chan)) { 881 if (chan->hs_hchan) 882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE); 883 else 884 kfree_skb(skb); 885 886 return; 887 } 888 889 /* Use NO_FLUSH for LE links (where this is the only option) or 890 * if the BR/EDR link supports it and flushing has not been 891 * explicitly requested (through FLAG_FLUSHABLE). 892 */ 893 if (hcon->type == LE_LINK || 894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) && 895 lmp_no_flush_capable(hcon->hdev))) 896 flags = ACL_START_NO_FLUSH; 897 else 898 flags = ACL_START; 899 900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); 901 hci_send_acl(chan->conn->hchan, skb, flags); 902 } 903 904 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) 905 { 906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; 907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; 908 909 if (enh & L2CAP_CTRL_FRAME_TYPE) { 910 /* S-Frame */ 911 control->sframe = 1; 912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; 913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; 914 915 control->sar = 0; 916 control->txseq = 0; 917 } else { 918 /* I-Frame */ 919 control->sframe = 0; 920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; 921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; 922 923 control->poll = 0; 924 control->super = 0; 925 } 926 } 927 928 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) 929 { 930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; 931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; 932 933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { 934 /* S-Frame */ 935 control->sframe = 1; 936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; 937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; 938 939 control->sar = 0; 940 control->txseq = 0; 941 } else { 942 /* I-Frame */ 943 control->sframe = 0; 944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; 945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; 946 947 control->poll = 0; 948 control->super = 0; 949 } 950 } 951 952 static inline void __unpack_control(struct l2cap_chan *chan, 953 struct sk_buff *skb) 954 { 955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 956 __unpack_extended_control(get_unaligned_le32(skb->data), 957 &bt_cb(skb)->l2cap); 958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE); 959 } else { 960 __unpack_enhanced_control(get_unaligned_le16(skb->data), 961 &bt_cb(skb)->l2cap); 962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE); 963 } 964 } 965 966 static u32 __pack_extended_control(struct l2cap_ctrl *control) 967 { 968 u32 packed; 969 970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; 971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; 972 973 if (control->sframe) { 974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; 975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; 976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE; 977 } else { 978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; 979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; 980 } 981 982 return packed; 983 } 984 985 static u16 __pack_enhanced_control(struct l2cap_ctrl *control) 986 { 987 u16 packed; 988 989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; 990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; 991 992 if (control->sframe) { 993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; 994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; 995 packed |= L2CAP_CTRL_FRAME_TYPE; 996 } else { 997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; 998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; 999 } 1000 1001 return packed; 1002 } 1003 1004 static inline void __pack_control(struct l2cap_chan *chan, 1005 struct l2cap_ctrl *control, 1006 struct sk_buff *skb) 1007 { 1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1009 put_unaligned_le32(__pack_extended_control(control), 1010 skb->data + L2CAP_HDR_SIZE); 1011 } else { 1012 put_unaligned_le16(__pack_enhanced_control(control), 1013 skb->data + L2CAP_HDR_SIZE); 1014 } 1015 } 1016 1017 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) 1018 { 1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1020 return L2CAP_EXT_HDR_SIZE; 1021 else 1022 return L2CAP_ENH_HDR_SIZE; 1023 } 1024 1025 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, 1026 u32 control) 1027 { 1028 struct sk_buff *skb; 1029 struct l2cap_hdr *lh; 1030 int hlen = __ertm_hdr_size(chan); 1031 1032 if (chan->fcs == L2CAP_FCS_CRC16) 1033 hlen += L2CAP_FCS_SIZE; 1034 1035 skb = bt_skb_alloc(hlen, GFP_KERNEL); 1036 1037 if (!skb) 1038 return ERR_PTR(-ENOMEM); 1039 1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 1042 lh->cid = cpu_to_le16(chan->dcid); 1043 1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 1046 else 1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 1048 1049 if (chan->fcs == L2CAP_FCS_CRC16) { 1050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len); 1051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1052 } 1053 1054 skb->priority = HCI_PRIO_MAX; 1055 return skb; 1056 } 1057 1058 static void l2cap_send_sframe(struct l2cap_chan *chan, 1059 struct l2cap_ctrl *control) 1060 { 1061 struct sk_buff *skb; 1062 u32 control_field; 1063 1064 BT_DBG("chan %p, control %p", chan, control); 1065 1066 if (!control->sframe) 1067 return; 1068 1069 if (__chan_is_moving(chan)) 1070 return; 1071 1072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && 1073 !control->poll) 1074 control->final = 1; 1075 1076 if (control->super == L2CAP_SUPER_RR) 1077 clear_bit(CONN_RNR_SENT, &chan->conn_state); 1078 else if (control->super == L2CAP_SUPER_RNR) 1079 set_bit(CONN_RNR_SENT, &chan->conn_state); 1080 1081 if (control->super != L2CAP_SUPER_SREJ) { 1082 chan->last_acked_seq = control->reqseq; 1083 __clear_ack_timer(chan); 1084 } 1085 1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, 1087 control->final, control->poll, control->super); 1088 1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1090 control_field = __pack_extended_control(control); 1091 else 1092 control_field = __pack_enhanced_control(control); 1093 1094 skb = l2cap_create_sframe_pdu(chan, control_field); 1095 if (!IS_ERR(skb)) 1096 l2cap_do_send(chan, skb); 1097 } 1098 1099 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) 1100 { 1101 struct l2cap_ctrl control; 1102 1103 BT_DBG("chan %p, poll %d", chan, poll); 1104 1105 memset(&control, 0, sizeof(control)); 1106 control.sframe = 1; 1107 control.poll = poll; 1108 1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 1110 control.super = L2CAP_SUPER_RNR; 1111 else 1112 control.super = L2CAP_SUPER_RR; 1113 1114 control.reqseq = chan->buffer_seq; 1115 l2cap_send_sframe(chan, &control); 1116 } 1117 1118 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 1119 { 1120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 1121 return true; 1122 1123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 1124 } 1125 1126 static bool __amp_capable(struct l2cap_chan *chan) 1127 { 1128 struct l2cap_conn *conn = chan->conn; 1129 struct hci_dev *hdev; 1130 bool amp_available = false; 1131 1132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) 1133 return false; 1134 1135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP)) 1136 return false; 1137 1138 read_lock(&hci_dev_list_lock); 1139 list_for_each_entry(hdev, &hci_dev_list, list) { 1140 if (hdev->amp_type != AMP_TYPE_BREDR && 1141 test_bit(HCI_UP, &hdev->flags)) { 1142 amp_available = true; 1143 break; 1144 } 1145 } 1146 read_unlock(&hci_dev_list_lock); 1147 1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED) 1149 return amp_available; 1150 1151 return false; 1152 } 1153 1154 static bool l2cap_check_efs(struct l2cap_chan *chan) 1155 { 1156 /* Check EFS parameters */ 1157 return true; 1158 } 1159 1160 void l2cap_send_conn_req(struct l2cap_chan *chan) 1161 { 1162 struct l2cap_conn *conn = chan->conn; 1163 struct l2cap_conn_req req; 1164 1165 req.scid = cpu_to_le16(chan->scid); 1166 req.psm = chan->psm; 1167 1168 chan->ident = l2cap_get_ident(conn); 1169 1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 1171 1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); 1173 } 1174 1175 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id) 1176 { 1177 struct l2cap_create_chan_req req; 1178 req.scid = cpu_to_le16(chan->scid); 1179 req.psm = chan->psm; 1180 req.amp_id = amp_id; 1181 1182 chan->ident = l2cap_get_ident(chan->conn); 1183 1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ, 1185 sizeof(req), &req); 1186 } 1187 1188 static void l2cap_move_setup(struct l2cap_chan *chan) 1189 { 1190 struct sk_buff *skb; 1191 1192 BT_DBG("chan %p", chan); 1193 1194 if (chan->mode != L2CAP_MODE_ERTM) 1195 return; 1196 1197 __clear_retrans_timer(chan); 1198 __clear_monitor_timer(chan); 1199 __clear_ack_timer(chan); 1200 1201 chan->retry_count = 0; 1202 skb_queue_walk(&chan->tx_q, skb) { 1203 if (bt_cb(skb)->l2cap.retries) 1204 bt_cb(skb)->l2cap.retries = 1; 1205 else 1206 break; 1207 } 1208 1209 chan->expected_tx_seq = chan->buffer_seq; 1210 1211 clear_bit(CONN_REJ_ACT, &chan->conn_state); 1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 1213 l2cap_seq_list_clear(&chan->retrans_list); 1214 l2cap_seq_list_clear(&chan->srej_list); 1215 skb_queue_purge(&chan->srej_q); 1216 1217 chan->tx_state = L2CAP_TX_STATE_XMIT; 1218 chan->rx_state = L2CAP_RX_STATE_MOVE; 1219 1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 1221 } 1222 1223 static void l2cap_move_done(struct l2cap_chan *chan) 1224 { 1225 u8 move_role = chan->move_role; 1226 BT_DBG("chan %p", chan); 1227 1228 chan->move_state = L2CAP_MOVE_STABLE; 1229 chan->move_role = L2CAP_MOVE_ROLE_NONE; 1230 1231 if (chan->mode != L2CAP_MODE_ERTM) 1232 return; 1233 1234 switch (move_role) { 1235 case L2CAP_MOVE_ROLE_INITIATOR: 1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL); 1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F; 1238 break; 1239 case L2CAP_MOVE_ROLE_RESPONDER: 1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P; 1241 break; 1242 } 1243 } 1244 1245 static void l2cap_chan_ready(struct l2cap_chan *chan) 1246 { 1247 /* The channel may have already been flagged as connected in 1248 * case of receiving data before the L2CAP info req/rsp 1249 * procedure is complete. 1250 */ 1251 if (chan->state == BT_CONNECTED) 1252 return; 1253 1254 /* This clears all conf flags, including CONF_NOT_COMPLETE */ 1255 chan->conf_state = 0; 1256 __clear_chan_timer(chan); 1257 1258 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits) 1259 chan->ops->suspend(chan); 1260 1261 chan->state = BT_CONNECTED; 1262 1263 chan->ops->ready(chan); 1264 } 1265 1266 static void l2cap_le_connect(struct l2cap_chan *chan) 1267 { 1268 struct l2cap_conn *conn = chan->conn; 1269 struct l2cap_le_conn_req req; 1270 1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) 1272 return; 1273 1274 req.psm = chan->psm; 1275 req.scid = cpu_to_le16(chan->scid); 1276 req.mtu = cpu_to_le16(chan->imtu); 1277 req.mps = cpu_to_le16(chan->mps); 1278 req.credits = cpu_to_le16(chan->rx_credits); 1279 1280 chan->ident = l2cap_get_ident(conn); 1281 1282 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, 1283 sizeof(req), &req); 1284 } 1285 1286 static void l2cap_le_start(struct l2cap_chan *chan) 1287 { 1288 struct l2cap_conn *conn = chan->conn; 1289 1290 if (!smp_conn_security(conn->hcon, chan->sec_level)) 1291 return; 1292 1293 if (!chan->psm) { 1294 l2cap_chan_ready(chan); 1295 return; 1296 } 1297 1298 if (chan->state == BT_CONNECT) 1299 l2cap_le_connect(chan); 1300 } 1301 1302 static void l2cap_start_connection(struct l2cap_chan *chan) 1303 { 1304 if (__amp_capable(chan)) { 1305 BT_DBG("chan %p AMP capable: discover AMPs", chan); 1306 a2mp_discover_amp(chan); 1307 } else if (chan->conn->hcon->type == LE_LINK) { 1308 l2cap_le_start(chan); 1309 } else { 1310 l2cap_send_conn_req(chan); 1311 } 1312 } 1313 1314 static void l2cap_request_info(struct l2cap_conn *conn) 1315 { 1316 struct l2cap_info_req req; 1317 1318 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1319 return; 1320 1321 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 1322 1323 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 1324 conn->info_ident = l2cap_get_ident(conn); 1325 1326 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 1327 1328 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 1329 sizeof(req), &req); 1330 } 1331 1332 static void l2cap_do_start(struct l2cap_chan *chan) 1333 { 1334 struct l2cap_conn *conn = chan->conn; 1335 1336 if (conn->hcon->type == LE_LINK) { 1337 l2cap_le_start(chan); 1338 return; 1339 } 1340 1341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { 1342 l2cap_request_info(conn); 1343 return; 1344 } 1345 1346 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 1347 return; 1348 1349 if (l2cap_chan_check_security(chan, true) && 1350 __l2cap_no_conn_pending(chan)) 1351 l2cap_start_connection(chan); 1352 } 1353 1354 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 1355 { 1356 u32 local_feat_mask = l2cap_feat_mask; 1357 if (!disable_ertm) 1358 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; 1359 1360 switch (mode) { 1361 case L2CAP_MODE_ERTM: 1362 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; 1363 case L2CAP_MODE_STREAMING: 1364 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; 1365 default: 1366 return 0x00; 1367 } 1368 } 1369 1370 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) 1371 { 1372 struct l2cap_conn *conn = chan->conn; 1373 struct l2cap_disconn_req req; 1374 1375 if (!conn) 1376 return; 1377 1378 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { 1379 __clear_retrans_timer(chan); 1380 __clear_monitor_timer(chan); 1381 __clear_ack_timer(chan); 1382 } 1383 1384 if (chan->scid == L2CAP_CID_A2MP) { 1385 l2cap_state_change(chan, BT_DISCONN); 1386 return; 1387 } 1388 1389 req.dcid = cpu_to_le16(chan->dcid); 1390 req.scid = cpu_to_le16(chan->scid); 1391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, 1392 sizeof(req), &req); 1393 1394 l2cap_state_change_and_error(chan, BT_DISCONN, err); 1395 } 1396 1397 /* ---- L2CAP connections ---- */ 1398 static void l2cap_conn_start(struct l2cap_conn *conn) 1399 { 1400 struct l2cap_chan *chan, *tmp; 1401 1402 BT_DBG("conn %p", conn); 1403 1404 mutex_lock(&conn->chan_lock); 1405 1406 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 1407 l2cap_chan_lock(chan); 1408 1409 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1410 l2cap_chan_ready(chan); 1411 l2cap_chan_unlock(chan); 1412 continue; 1413 } 1414 1415 if (chan->state == BT_CONNECT) { 1416 if (!l2cap_chan_check_security(chan, true) || 1417 !__l2cap_no_conn_pending(chan)) { 1418 l2cap_chan_unlock(chan); 1419 continue; 1420 } 1421 1422 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 1423 && test_bit(CONF_STATE2_DEVICE, 1424 &chan->conf_state)) { 1425 l2cap_chan_close(chan, ECONNRESET); 1426 l2cap_chan_unlock(chan); 1427 continue; 1428 } 1429 1430 l2cap_start_connection(chan); 1431 1432 } else if (chan->state == BT_CONNECT2) { 1433 struct l2cap_conn_rsp rsp; 1434 char buf[128]; 1435 rsp.scid = cpu_to_le16(chan->dcid); 1436 rsp.dcid = cpu_to_le16(chan->scid); 1437 1438 if (l2cap_chan_check_security(chan, false)) { 1439 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 1440 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1442 chan->ops->defer(chan); 1443 1444 } else { 1445 l2cap_state_change(chan, BT_CONFIG); 1446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1448 } 1449 } else { 1450 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1452 } 1453 1454 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1455 sizeof(rsp), &rsp); 1456 1457 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 1458 rsp.result != L2CAP_CR_SUCCESS) { 1459 l2cap_chan_unlock(chan); 1460 continue; 1461 } 1462 1463 set_bit(CONF_REQ_SENT, &chan->conf_state); 1464 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1465 l2cap_build_conf_req(chan, buf), buf); 1466 chan->num_conf_req++; 1467 } 1468 1469 l2cap_chan_unlock(chan); 1470 } 1471 1472 mutex_unlock(&conn->chan_lock); 1473 } 1474 1475 static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1476 { 1477 struct hci_conn *hcon = conn->hcon; 1478 struct hci_dev *hdev = hcon->hdev; 1479 1480 BT_DBG("%s conn %p", hdev->name, conn); 1481 1482 /* For outgoing pairing which doesn't necessarily have an 1483 * associated socket (e.g. mgmt_pair_device). 1484 */ 1485 if (hcon->out) 1486 smp_conn_security(hcon, hcon->pending_sec_level); 1487 1488 /* For LE slave connections, make sure the connection interval 1489 * is in the range of the minium and maximum interval that has 1490 * been configured for this connection. If not, then trigger 1491 * the connection update procedure. 1492 */ 1493 if (hcon->role == HCI_ROLE_SLAVE && 1494 (hcon->le_conn_interval < hcon->le_conn_min_interval || 1495 hcon->le_conn_interval > hcon->le_conn_max_interval)) { 1496 struct l2cap_conn_param_update_req req; 1497 1498 req.min = cpu_to_le16(hcon->le_conn_min_interval); 1499 req.max = cpu_to_le16(hcon->le_conn_max_interval); 1500 req.latency = cpu_to_le16(hcon->le_conn_latency); 1501 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); 1502 1503 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1504 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); 1505 } 1506 } 1507 1508 static void l2cap_conn_ready(struct l2cap_conn *conn) 1509 { 1510 struct l2cap_chan *chan; 1511 struct hci_conn *hcon = conn->hcon; 1512 1513 BT_DBG("conn %p", conn); 1514 1515 if (hcon->type == ACL_LINK) 1516 l2cap_request_info(conn); 1517 1518 mutex_lock(&conn->chan_lock); 1519 1520 list_for_each_entry(chan, &conn->chan_l, list) { 1521 1522 l2cap_chan_lock(chan); 1523 1524 if (chan->scid == L2CAP_CID_A2MP) { 1525 l2cap_chan_unlock(chan); 1526 continue; 1527 } 1528 1529 if (hcon->type == LE_LINK) { 1530 l2cap_le_start(chan); 1531 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 1533 l2cap_chan_ready(chan); 1534 } else if (chan->state == BT_CONNECT) { 1535 l2cap_do_start(chan); 1536 } 1537 1538 l2cap_chan_unlock(chan); 1539 } 1540 1541 mutex_unlock(&conn->chan_lock); 1542 1543 if (hcon->type == LE_LINK) 1544 l2cap_le_conn_ready(conn); 1545 1546 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); 1547 } 1548 1549 /* Notify sockets that we cannot guaranty reliability anymore */ 1550 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 1551 { 1552 struct l2cap_chan *chan; 1553 1554 BT_DBG("conn %p", conn); 1555 1556 mutex_lock(&conn->chan_lock); 1557 1558 list_for_each_entry(chan, &conn->chan_l, list) { 1559 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1560 l2cap_chan_set_err(chan, err); 1561 } 1562 1563 mutex_unlock(&conn->chan_lock); 1564 } 1565 1566 static void l2cap_info_timeout(struct work_struct *work) 1567 { 1568 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1569 info_timer.work); 1570 1571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 1572 conn->info_ident = 0; 1573 1574 l2cap_conn_start(conn); 1575 } 1576 1577 /* 1578 * l2cap_user 1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe 1580 * callback is called during registration. The ->remove callback is called 1581 * during unregistration. 1582 * An l2cap_user object can either be explicitly unregistered or when the 1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, 1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. 1585 * External modules must own a reference to the l2cap_conn object if they intend 1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at 1587 * any time if they don't. 1588 */ 1589 1590 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) 1591 { 1592 struct hci_dev *hdev = conn->hcon->hdev; 1593 int ret; 1594 1595 /* We need to check whether l2cap_conn is registered. If it is not, we 1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters 1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it 1598 * relies on the parent hci_conn object to be locked. This itself relies 1599 * on the hci_dev object to be locked. So we must lock the hci device 1600 * here, too. */ 1601 1602 hci_dev_lock(hdev); 1603 1604 if (!list_empty(&user->list)) { 1605 ret = -EINVAL; 1606 goto out_unlock; 1607 } 1608 1609 /* conn->hchan is NULL after l2cap_conn_del() was called */ 1610 if (!conn->hchan) { 1611 ret = -ENODEV; 1612 goto out_unlock; 1613 } 1614 1615 ret = user->probe(conn, user); 1616 if (ret) 1617 goto out_unlock; 1618 1619 list_add(&user->list, &conn->users); 1620 ret = 0; 1621 1622 out_unlock: 1623 hci_dev_unlock(hdev); 1624 return ret; 1625 } 1626 EXPORT_SYMBOL(l2cap_register_user); 1627 1628 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) 1629 { 1630 struct hci_dev *hdev = conn->hcon->hdev; 1631 1632 hci_dev_lock(hdev); 1633 1634 if (list_empty(&user->list)) 1635 goto out_unlock; 1636 1637 list_del_init(&user->list); 1638 user->remove(conn, user); 1639 1640 out_unlock: 1641 hci_dev_unlock(hdev); 1642 } 1643 EXPORT_SYMBOL(l2cap_unregister_user); 1644 1645 static void l2cap_unregister_all_users(struct l2cap_conn *conn) 1646 { 1647 struct l2cap_user *user; 1648 1649 while (!list_empty(&conn->users)) { 1650 user = list_first_entry(&conn->users, struct l2cap_user, list); 1651 list_del_init(&user->list); 1652 user->remove(conn, user); 1653 } 1654 } 1655 1656 static void l2cap_conn_del(struct hci_conn *hcon, int err) 1657 { 1658 struct l2cap_conn *conn = hcon->l2cap_data; 1659 struct l2cap_chan *chan, *l; 1660 1661 if (!conn) 1662 return; 1663 1664 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 1665 1666 kfree_skb(conn->rx_skb); 1667 1668 skb_queue_purge(&conn->pending_rx); 1669 1670 /* We can not call flush_work(&conn->pending_rx_work) here since we 1671 * might block if we are running on a worker from the same workqueue 1672 * pending_rx_work is waiting on. 1673 */ 1674 if (work_pending(&conn->pending_rx_work)) 1675 cancel_work_sync(&conn->pending_rx_work); 1676 1677 if (work_pending(&conn->id_addr_update_work)) 1678 cancel_work_sync(&conn->id_addr_update_work); 1679 1680 l2cap_unregister_all_users(conn); 1681 1682 /* Force the connection to be immediately dropped */ 1683 hcon->disc_timeout = 0; 1684 1685 mutex_lock(&conn->chan_lock); 1686 1687 /* Kill channels */ 1688 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1689 l2cap_chan_hold(chan); 1690 l2cap_chan_lock(chan); 1691 1692 l2cap_chan_del(chan, err); 1693 1694 l2cap_chan_unlock(chan); 1695 1696 chan->ops->close(chan); 1697 l2cap_chan_put(chan); 1698 } 1699 1700 mutex_unlock(&conn->chan_lock); 1701 1702 hci_chan_del(conn->hchan); 1703 1704 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1705 cancel_delayed_work_sync(&conn->info_timer); 1706 1707 hcon->l2cap_data = NULL; 1708 conn->hchan = NULL; 1709 l2cap_conn_put(conn); 1710 } 1711 1712 static void l2cap_conn_free(struct kref *ref) 1713 { 1714 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); 1715 1716 hci_conn_put(conn->hcon); 1717 kfree(conn); 1718 } 1719 1720 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) 1721 { 1722 kref_get(&conn->ref); 1723 return conn; 1724 } 1725 EXPORT_SYMBOL(l2cap_conn_get); 1726 1727 void l2cap_conn_put(struct l2cap_conn *conn) 1728 { 1729 kref_put(&conn->ref, l2cap_conn_free); 1730 } 1731 EXPORT_SYMBOL(l2cap_conn_put); 1732 1733 /* ---- Socket interface ---- */ 1734 1735 /* Find socket with psm and source / destination bdaddr. 1736 * Returns closest match. 1737 */ 1738 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, 1739 bdaddr_t *src, 1740 bdaddr_t *dst, 1741 u8 link_type) 1742 { 1743 struct l2cap_chan *c, *c1 = NULL; 1744 1745 read_lock(&chan_list_lock); 1746 1747 list_for_each_entry(c, &chan_list, global_l) { 1748 if (state && c->state != state) 1749 continue; 1750 1751 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) 1752 continue; 1753 1754 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) 1755 continue; 1756 1757 if (c->psm == psm) { 1758 int src_match, dst_match; 1759 int src_any, dst_any; 1760 1761 /* Exact match. */ 1762 src_match = !bacmp(&c->src, src); 1763 dst_match = !bacmp(&c->dst, dst); 1764 if (src_match && dst_match) { 1765 l2cap_chan_hold(c); 1766 read_unlock(&chan_list_lock); 1767 return c; 1768 } 1769 1770 /* Closest match */ 1771 src_any = !bacmp(&c->src, BDADDR_ANY); 1772 dst_any = !bacmp(&c->dst, BDADDR_ANY); 1773 if ((src_match && dst_any) || (src_any && dst_match) || 1774 (src_any && dst_any)) 1775 c1 = c; 1776 } 1777 } 1778 1779 if (c1) 1780 l2cap_chan_hold(c1); 1781 1782 read_unlock(&chan_list_lock); 1783 1784 return c1; 1785 } 1786 1787 static void l2cap_monitor_timeout(struct work_struct *work) 1788 { 1789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1790 monitor_timer.work); 1791 1792 BT_DBG("chan %p", chan); 1793 1794 l2cap_chan_lock(chan); 1795 1796 if (!chan->conn) { 1797 l2cap_chan_unlock(chan); 1798 l2cap_chan_put(chan); 1799 return; 1800 } 1801 1802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); 1803 1804 l2cap_chan_unlock(chan); 1805 l2cap_chan_put(chan); 1806 } 1807 1808 static void l2cap_retrans_timeout(struct work_struct *work) 1809 { 1810 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1811 retrans_timer.work); 1812 1813 BT_DBG("chan %p", chan); 1814 1815 l2cap_chan_lock(chan); 1816 1817 if (!chan->conn) { 1818 l2cap_chan_unlock(chan); 1819 l2cap_chan_put(chan); 1820 return; 1821 } 1822 1823 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); 1824 l2cap_chan_unlock(chan); 1825 l2cap_chan_put(chan); 1826 } 1827 1828 static void l2cap_streaming_send(struct l2cap_chan *chan, 1829 struct sk_buff_head *skbs) 1830 { 1831 struct sk_buff *skb; 1832 struct l2cap_ctrl *control; 1833 1834 BT_DBG("chan %p, skbs %p", chan, skbs); 1835 1836 if (__chan_is_moving(chan)) 1837 return; 1838 1839 skb_queue_splice_tail_init(skbs, &chan->tx_q); 1840 1841 while (!skb_queue_empty(&chan->tx_q)) { 1842 1843 skb = skb_dequeue(&chan->tx_q); 1844 1845 bt_cb(skb)->l2cap.retries = 1; 1846 control = &bt_cb(skb)->l2cap; 1847 1848 control->reqseq = 0; 1849 control->txseq = chan->next_tx_seq; 1850 1851 __pack_control(chan, control, skb); 1852 1853 if (chan->fcs == L2CAP_FCS_CRC16) { 1854 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 1855 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1856 } 1857 1858 l2cap_do_send(chan, skb); 1859 1860 BT_DBG("Sent txseq %u", control->txseq); 1861 1862 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1863 chan->frames_sent++; 1864 } 1865 } 1866 1867 static int l2cap_ertm_send(struct l2cap_chan *chan) 1868 { 1869 struct sk_buff *skb, *tx_skb; 1870 struct l2cap_ctrl *control; 1871 int sent = 0; 1872 1873 BT_DBG("chan %p", chan); 1874 1875 if (chan->state != BT_CONNECTED) 1876 return -ENOTCONN; 1877 1878 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1879 return 0; 1880 1881 if (__chan_is_moving(chan)) 1882 return 0; 1883 1884 while (chan->tx_send_head && 1885 chan->unacked_frames < chan->remote_tx_win && 1886 chan->tx_state == L2CAP_TX_STATE_XMIT) { 1887 1888 skb = chan->tx_send_head; 1889 1890 bt_cb(skb)->l2cap.retries = 1; 1891 control = &bt_cb(skb)->l2cap; 1892 1893 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1894 control->final = 1; 1895 1896 control->reqseq = chan->buffer_seq; 1897 chan->last_acked_seq = chan->buffer_seq; 1898 control->txseq = chan->next_tx_seq; 1899 1900 __pack_control(chan, control, skb); 1901 1902 if (chan->fcs == L2CAP_FCS_CRC16) { 1903 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 1904 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1905 } 1906 1907 /* Clone after data has been modified. Data is assumed to be 1908 read-only (for locking purposes) on cloned sk_buffs. 1909 */ 1910 tx_skb = skb_clone(skb, GFP_KERNEL); 1911 1912 if (!tx_skb) 1913 break; 1914 1915 __set_retrans_timer(chan); 1916 1917 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1918 chan->unacked_frames++; 1919 chan->frames_sent++; 1920 sent++; 1921 1922 if (skb_queue_is_last(&chan->tx_q, skb)) 1923 chan->tx_send_head = NULL; 1924 else 1925 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 1926 1927 l2cap_do_send(chan, tx_skb); 1928 BT_DBG("Sent txseq %u", control->txseq); 1929 } 1930 1931 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, 1932 chan->unacked_frames, skb_queue_len(&chan->tx_q)); 1933 1934 return sent; 1935 } 1936 1937 static void l2cap_ertm_resend(struct l2cap_chan *chan) 1938 { 1939 struct l2cap_ctrl control; 1940 struct sk_buff *skb; 1941 struct sk_buff *tx_skb; 1942 u16 seq; 1943 1944 BT_DBG("chan %p", chan); 1945 1946 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1947 return; 1948 1949 if (__chan_is_moving(chan)) 1950 return; 1951 1952 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { 1953 seq = l2cap_seq_list_pop(&chan->retrans_list); 1954 1955 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); 1956 if (!skb) { 1957 BT_DBG("Error: Can't retransmit seq %d, frame missing", 1958 seq); 1959 continue; 1960 } 1961 1962 bt_cb(skb)->l2cap.retries++; 1963 control = bt_cb(skb)->l2cap; 1964 1965 if (chan->max_tx != 0 && 1966 bt_cb(skb)->l2cap.retries > chan->max_tx) { 1967 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 1968 l2cap_send_disconn_req(chan, ECONNRESET); 1969 l2cap_seq_list_clear(&chan->retrans_list); 1970 break; 1971 } 1972 1973 control.reqseq = chan->buffer_seq; 1974 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1975 control.final = 1; 1976 else 1977 control.final = 0; 1978 1979 if (skb_cloned(skb)) { 1980 /* Cloned sk_buffs are read-only, so we need a 1981 * writeable copy 1982 */ 1983 tx_skb = skb_copy(skb, GFP_KERNEL); 1984 } else { 1985 tx_skb = skb_clone(skb, GFP_KERNEL); 1986 } 1987 1988 if (!tx_skb) { 1989 l2cap_seq_list_clear(&chan->retrans_list); 1990 break; 1991 } 1992 1993 /* Update skb contents */ 1994 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1995 put_unaligned_le32(__pack_extended_control(&control), 1996 tx_skb->data + L2CAP_HDR_SIZE); 1997 } else { 1998 put_unaligned_le16(__pack_enhanced_control(&control), 1999 tx_skb->data + L2CAP_HDR_SIZE); 2000 } 2001 2002 /* Update FCS */ 2003 if (chan->fcs == L2CAP_FCS_CRC16) { 2004 u16 fcs = crc16(0, (u8 *) tx_skb->data, 2005 tx_skb->len - L2CAP_FCS_SIZE); 2006 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - 2007 L2CAP_FCS_SIZE); 2008 } 2009 2010 l2cap_do_send(chan, tx_skb); 2011 2012 BT_DBG("Resent txseq %d", control.txseq); 2013 2014 chan->last_acked_seq = chan->buffer_seq; 2015 } 2016 } 2017 2018 static void l2cap_retransmit(struct l2cap_chan *chan, 2019 struct l2cap_ctrl *control) 2020 { 2021 BT_DBG("chan %p, control %p", chan, control); 2022 2023 l2cap_seq_list_append(&chan->retrans_list, control->reqseq); 2024 l2cap_ertm_resend(chan); 2025 } 2026 2027 static void l2cap_retransmit_all(struct l2cap_chan *chan, 2028 struct l2cap_ctrl *control) 2029 { 2030 struct sk_buff *skb; 2031 2032 BT_DBG("chan %p, control %p", chan, control); 2033 2034 if (control->poll) 2035 set_bit(CONN_SEND_FBIT, &chan->conn_state); 2036 2037 l2cap_seq_list_clear(&chan->retrans_list); 2038 2039 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2040 return; 2041 2042 if (chan->unacked_frames) { 2043 skb_queue_walk(&chan->tx_q, skb) { 2044 if (bt_cb(skb)->l2cap.txseq == control->reqseq || 2045 skb == chan->tx_send_head) 2046 break; 2047 } 2048 2049 skb_queue_walk_from(&chan->tx_q, skb) { 2050 if (skb == chan->tx_send_head) 2051 break; 2052 2053 l2cap_seq_list_append(&chan->retrans_list, 2054 bt_cb(skb)->l2cap.txseq); 2055 } 2056 2057 l2cap_ertm_resend(chan); 2058 } 2059 } 2060 2061 static void l2cap_send_ack(struct l2cap_chan *chan) 2062 { 2063 struct l2cap_ctrl control; 2064 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 2065 chan->last_acked_seq); 2066 int threshold; 2067 2068 BT_DBG("chan %p last_acked_seq %d buffer_seq %d", 2069 chan, chan->last_acked_seq, chan->buffer_seq); 2070 2071 memset(&control, 0, sizeof(control)); 2072 control.sframe = 1; 2073 2074 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 2075 chan->rx_state == L2CAP_RX_STATE_RECV) { 2076 __clear_ack_timer(chan); 2077 control.super = L2CAP_SUPER_RNR; 2078 control.reqseq = chan->buffer_seq; 2079 l2cap_send_sframe(chan, &control); 2080 } else { 2081 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { 2082 l2cap_ertm_send(chan); 2083 /* If any i-frames were sent, they included an ack */ 2084 if (chan->buffer_seq == chan->last_acked_seq) 2085 frames_to_ack = 0; 2086 } 2087 2088 /* Ack now if the window is 3/4ths full. 2089 * Calculate without mul or div 2090 */ 2091 threshold = chan->ack_win; 2092 threshold += threshold << 1; 2093 threshold >>= 2; 2094 2095 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, 2096 threshold); 2097 2098 if (frames_to_ack >= threshold) { 2099 __clear_ack_timer(chan); 2100 control.super = L2CAP_SUPER_RR; 2101 control.reqseq = chan->buffer_seq; 2102 l2cap_send_sframe(chan, &control); 2103 frames_to_ack = 0; 2104 } 2105 2106 if (frames_to_ack) 2107 __set_ack_timer(chan); 2108 } 2109 } 2110 2111 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 2112 struct msghdr *msg, int len, 2113 int count, struct sk_buff *skb) 2114 { 2115 struct l2cap_conn *conn = chan->conn; 2116 struct sk_buff **frag; 2117 int sent = 0; 2118 2119 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count) 2120 return -EFAULT; 2121 2122 sent += count; 2123 len -= count; 2124 2125 /* Continuation fragments (no L2CAP header) */ 2126 frag = &skb_shinfo(skb)->frag_list; 2127 while (len) { 2128 struct sk_buff *tmp; 2129 2130 count = min_t(unsigned int, conn->mtu, len); 2131 2132 tmp = chan->ops->alloc_skb(chan, 0, count, 2133 msg->msg_flags & MSG_DONTWAIT); 2134 if (IS_ERR(tmp)) 2135 return PTR_ERR(tmp); 2136 2137 *frag = tmp; 2138 2139 if (copy_from_iter(skb_put(*frag, count), count, 2140 &msg->msg_iter) != count) 2141 return -EFAULT; 2142 2143 sent += count; 2144 len -= count; 2145 2146 skb->len += (*frag)->len; 2147 skb->data_len += (*frag)->len; 2148 2149 frag = &(*frag)->next; 2150 } 2151 2152 return sent; 2153 } 2154 2155 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 2156 struct msghdr *msg, size_t len) 2157 { 2158 struct l2cap_conn *conn = chan->conn; 2159 struct sk_buff *skb; 2160 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 2161 struct l2cap_hdr *lh; 2162 2163 BT_DBG("chan %p psm 0x%2.2x len %zu", chan, 2164 __le16_to_cpu(chan->psm), len); 2165 2166 count = min_t(unsigned int, (conn->mtu - hlen), len); 2167 2168 skb = chan->ops->alloc_skb(chan, hlen, count, 2169 msg->msg_flags & MSG_DONTWAIT); 2170 if (IS_ERR(skb)) 2171 return skb; 2172 2173 /* Create L2CAP header */ 2174 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2175 lh->cid = cpu_to_le16(chan->dcid); 2176 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); 2177 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); 2178 2179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2180 if (unlikely(err < 0)) { 2181 kfree_skb(skb); 2182 return ERR_PTR(err); 2183 } 2184 return skb; 2185 } 2186 2187 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 2188 struct msghdr *msg, size_t len) 2189 { 2190 struct l2cap_conn *conn = chan->conn; 2191 struct sk_buff *skb; 2192 int err, count; 2193 struct l2cap_hdr *lh; 2194 2195 BT_DBG("chan %p len %zu", chan, len); 2196 2197 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 2198 2199 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, 2200 msg->msg_flags & MSG_DONTWAIT); 2201 if (IS_ERR(skb)) 2202 return skb; 2203 2204 /* Create L2CAP header */ 2205 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2206 lh->cid = cpu_to_le16(chan->dcid); 2207 lh->len = cpu_to_le16(len); 2208 2209 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2210 if (unlikely(err < 0)) { 2211 kfree_skb(skb); 2212 return ERR_PTR(err); 2213 } 2214 return skb; 2215 } 2216 2217 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 2218 struct msghdr *msg, size_t len, 2219 u16 sdulen) 2220 { 2221 struct l2cap_conn *conn = chan->conn; 2222 struct sk_buff *skb; 2223 int err, count, hlen; 2224 struct l2cap_hdr *lh; 2225 2226 BT_DBG("chan %p len %zu", chan, len); 2227 2228 if (!conn) 2229 return ERR_PTR(-ENOTCONN); 2230 2231 hlen = __ertm_hdr_size(chan); 2232 2233 if (sdulen) 2234 hlen += L2CAP_SDULEN_SIZE; 2235 2236 if (chan->fcs == L2CAP_FCS_CRC16) 2237 hlen += L2CAP_FCS_SIZE; 2238 2239 count = min_t(unsigned int, (conn->mtu - hlen), len); 2240 2241 skb = chan->ops->alloc_skb(chan, hlen, count, 2242 msg->msg_flags & MSG_DONTWAIT); 2243 if (IS_ERR(skb)) 2244 return skb; 2245 2246 /* Create L2CAP header */ 2247 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2248 lh->cid = cpu_to_le16(chan->dcid); 2249 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2250 2251 /* Control header is populated later */ 2252 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2253 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 2254 else 2255 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 2256 2257 if (sdulen) 2258 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2259 2260 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2261 if (unlikely(err < 0)) { 2262 kfree_skb(skb); 2263 return ERR_PTR(err); 2264 } 2265 2266 bt_cb(skb)->l2cap.fcs = chan->fcs; 2267 bt_cb(skb)->l2cap.retries = 0; 2268 return skb; 2269 } 2270 2271 static int l2cap_segment_sdu(struct l2cap_chan *chan, 2272 struct sk_buff_head *seg_queue, 2273 struct msghdr *msg, size_t len) 2274 { 2275 struct sk_buff *skb; 2276 u16 sdu_len; 2277 size_t pdu_len; 2278 u8 sar; 2279 2280 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2281 2282 /* It is critical that ERTM PDUs fit in a single HCI fragment, 2283 * so fragmented skbs are not used. The HCI layer's handling 2284 * of fragmented skbs is not compatible with ERTM's queueing. 2285 */ 2286 2287 /* PDU size is derived from the HCI MTU */ 2288 pdu_len = chan->conn->mtu; 2289 2290 /* Constrain PDU size for BR/EDR connections */ 2291 if (!chan->hs_hcon) 2292 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2293 2294 /* Adjust for largest possible L2CAP overhead. */ 2295 if (chan->fcs) 2296 pdu_len -= L2CAP_FCS_SIZE; 2297 2298 pdu_len -= __ertm_hdr_size(chan); 2299 2300 /* Remote device may have requested smaller PDUs */ 2301 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2302 2303 if (len <= pdu_len) { 2304 sar = L2CAP_SAR_UNSEGMENTED; 2305 sdu_len = 0; 2306 pdu_len = len; 2307 } else { 2308 sar = L2CAP_SAR_START; 2309 sdu_len = len; 2310 } 2311 2312 while (len > 0) { 2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); 2314 2315 if (IS_ERR(skb)) { 2316 __skb_queue_purge(seg_queue); 2317 return PTR_ERR(skb); 2318 } 2319 2320 bt_cb(skb)->l2cap.sar = sar; 2321 __skb_queue_tail(seg_queue, skb); 2322 2323 len -= pdu_len; 2324 if (sdu_len) 2325 sdu_len = 0; 2326 2327 if (len <= pdu_len) { 2328 sar = L2CAP_SAR_END; 2329 pdu_len = len; 2330 } else { 2331 sar = L2CAP_SAR_CONTINUE; 2332 } 2333 } 2334 2335 return 0; 2336 } 2337 2338 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, 2339 struct msghdr *msg, 2340 size_t len, u16 sdulen) 2341 { 2342 struct l2cap_conn *conn = chan->conn; 2343 struct sk_buff *skb; 2344 int err, count, hlen; 2345 struct l2cap_hdr *lh; 2346 2347 BT_DBG("chan %p len %zu", chan, len); 2348 2349 if (!conn) 2350 return ERR_PTR(-ENOTCONN); 2351 2352 hlen = L2CAP_HDR_SIZE; 2353 2354 if (sdulen) 2355 hlen += L2CAP_SDULEN_SIZE; 2356 2357 count = min_t(unsigned int, (conn->mtu - hlen), len); 2358 2359 skb = chan->ops->alloc_skb(chan, hlen, count, 2360 msg->msg_flags & MSG_DONTWAIT); 2361 if (IS_ERR(skb)) 2362 return skb; 2363 2364 /* Create L2CAP header */ 2365 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2366 lh->cid = cpu_to_le16(chan->dcid); 2367 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2368 2369 if (sdulen) 2370 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2371 2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2373 if (unlikely(err < 0)) { 2374 kfree_skb(skb); 2375 return ERR_PTR(err); 2376 } 2377 2378 return skb; 2379 } 2380 2381 static int l2cap_segment_le_sdu(struct l2cap_chan *chan, 2382 struct sk_buff_head *seg_queue, 2383 struct msghdr *msg, size_t len) 2384 { 2385 struct sk_buff *skb; 2386 size_t pdu_len; 2387 u16 sdu_len; 2388 2389 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2390 2391 sdu_len = len; 2392 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; 2393 2394 while (len > 0) { 2395 if (len <= pdu_len) 2396 pdu_len = len; 2397 2398 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); 2399 if (IS_ERR(skb)) { 2400 __skb_queue_purge(seg_queue); 2401 return PTR_ERR(skb); 2402 } 2403 2404 __skb_queue_tail(seg_queue, skb); 2405 2406 len -= pdu_len; 2407 2408 if (sdu_len) { 2409 sdu_len = 0; 2410 pdu_len += L2CAP_SDULEN_SIZE; 2411 } 2412 } 2413 2414 return 0; 2415 } 2416 2417 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 2418 { 2419 struct sk_buff *skb; 2420 int err; 2421 struct sk_buff_head seg_queue; 2422 2423 if (!chan->conn) 2424 return -ENOTCONN; 2425 2426 /* Connectionless channel */ 2427 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2428 skb = l2cap_create_connless_pdu(chan, msg, len); 2429 if (IS_ERR(skb)) 2430 return PTR_ERR(skb); 2431 2432 /* Channel lock is released before requesting new skb and then 2433 * reacquired thus we need to recheck channel state. 2434 */ 2435 if (chan->state != BT_CONNECTED) { 2436 kfree_skb(skb); 2437 return -ENOTCONN; 2438 } 2439 2440 l2cap_do_send(chan, skb); 2441 return len; 2442 } 2443 2444 switch (chan->mode) { 2445 case L2CAP_MODE_LE_FLOWCTL: 2446 /* Check outgoing MTU */ 2447 if (len > chan->omtu) 2448 return -EMSGSIZE; 2449 2450 if (!chan->tx_credits) 2451 return -EAGAIN; 2452 2453 __skb_queue_head_init(&seg_queue); 2454 2455 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); 2456 2457 if (chan->state != BT_CONNECTED) { 2458 __skb_queue_purge(&seg_queue); 2459 err = -ENOTCONN; 2460 } 2461 2462 if (err) 2463 return err; 2464 2465 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); 2466 2467 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { 2468 l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); 2469 chan->tx_credits--; 2470 } 2471 2472 if (!chan->tx_credits) 2473 chan->ops->suspend(chan); 2474 2475 err = len; 2476 2477 break; 2478 2479 case L2CAP_MODE_BASIC: 2480 /* Check outgoing MTU */ 2481 if (len > chan->omtu) 2482 return -EMSGSIZE; 2483 2484 /* Create a basic PDU */ 2485 skb = l2cap_create_basic_pdu(chan, msg, len); 2486 if (IS_ERR(skb)) 2487 return PTR_ERR(skb); 2488 2489 /* Channel lock is released before requesting new skb and then 2490 * reacquired thus we need to recheck channel state. 2491 */ 2492 if (chan->state != BT_CONNECTED) { 2493 kfree_skb(skb); 2494 return -ENOTCONN; 2495 } 2496 2497 l2cap_do_send(chan, skb); 2498 err = len; 2499 break; 2500 2501 case L2CAP_MODE_ERTM: 2502 case L2CAP_MODE_STREAMING: 2503 /* Check outgoing MTU */ 2504 if (len > chan->omtu) { 2505 err = -EMSGSIZE; 2506 break; 2507 } 2508 2509 __skb_queue_head_init(&seg_queue); 2510 2511 /* Do segmentation before calling in to the state machine, 2512 * since it's possible to block while waiting for memory 2513 * allocation. 2514 */ 2515 err = l2cap_segment_sdu(chan, &seg_queue, msg, len); 2516 2517 /* The channel could have been closed while segmenting, 2518 * check that it is still connected. 2519 */ 2520 if (chan->state != BT_CONNECTED) { 2521 __skb_queue_purge(&seg_queue); 2522 err = -ENOTCONN; 2523 } 2524 2525 if (err) 2526 break; 2527 2528 if (chan->mode == L2CAP_MODE_ERTM) 2529 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); 2530 else 2531 l2cap_streaming_send(chan, &seg_queue); 2532 2533 err = len; 2534 2535 /* If the skbs were not queued for sending, they'll still be in 2536 * seg_queue and need to be purged. 2537 */ 2538 __skb_queue_purge(&seg_queue); 2539 break; 2540 2541 default: 2542 BT_DBG("bad state %1.1x", chan->mode); 2543 err = -EBADFD; 2544 } 2545 2546 return err; 2547 } 2548 EXPORT_SYMBOL_GPL(l2cap_chan_send); 2549 2550 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) 2551 { 2552 struct l2cap_ctrl control; 2553 u16 seq; 2554 2555 BT_DBG("chan %p, txseq %u", chan, txseq); 2556 2557 memset(&control, 0, sizeof(control)); 2558 control.sframe = 1; 2559 control.super = L2CAP_SUPER_SREJ; 2560 2561 for (seq = chan->expected_tx_seq; seq != txseq; 2562 seq = __next_seq(chan, seq)) { 2563 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { 2564 control.reqseq = seq; 2565 l2cap_send_sframe(chan, &control); 2566 l2cap_seq_list_append(&chan->srej_list, seq); 2567 } 2568 } 2569 2570 chan->expected_tx_seq = __next_seq(chan, txseq); 2571 } 2572 2573 static void l2cap_send_srej_tail(struct l2cap_chan *chan) 2574 { 2575 struct l2cap_ctrl control; 2576 2577 BT_DBG("chan %p", chan); 2578 2579 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) 2580 return; 2581 2582 memset(&control, 0, sizeof(control)); 2583 control.sframe = 1; 2584 control.super = L2CAP_SUPER_SREJ; 2585 control.reqseq = chan->srej_list.tail; 2586 l2cap_send_sframe(chan, &control); 2587 } 2588 2589 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) 2590 { 2591 struct l2cap_ctrl control; 2592 u16 initial_head; 2593 u16 seq; 2594 2595 BT_DBG("chan %p, txseq %u", chan, txseq); 2596 2597 memset(&control, 0, sizeof(control)); 2598 control.sframe = 1; 2599 control.super = L2CAP_SUPER_SREJ; 2600 2601 /* Capture initial list head to allow only one pass through the list. */ 2602 initial_head = chan->srej_list.head; 2603 2604 do { 2605 seq = l2cap_seq_list_pop(&chan->srej_list); 2606 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) 2607 break; 2608 2609 control.reqseq = seq; 2610 l2cap_send_sframe(chan, &control); 2611 l2cap_seq_list_append(&chan->srej_list, seq); 2612 } while (chan->srej_list.head != initial_head); 2613 } 2614 2615 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) 2616 { 2617 struct sk_buff *acked_skb; 2618 u16 ackseq; 2619 2620 BT_DBG("chan %p, reqseq %u", chan, reqseq); 2621 2622 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) 2623 return; 2624 2625 BT_DBG("expected_ack_seq %u, unacked_frames %u", 2626 chan->expected_ack_seq, chan->unacked_frames); 2627 2628 for (ackseq = chan->expected_ack_seq; ackseq != reqseq; 2629 ackseq = __next_seq(chan, ackseq)) { 2630 2631 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); 2632 if (acked_skb) { 2633 skb_unlink(acked_skb, &chan->tx_q); 2634 kfree_skb(acked_skb); 2635 chan->unacked_frames--; 2636 } 2637 } 2638 2639 chan->expected_ack_seq = reqseq; 2640 2641 if (chan->unacked_frames == 0) 2642 __clear_retrans_timer(chan); 2643 2644 BT_DBG("unacked_frames %u", chan->unacked_frames); 2645 } 2646 2647 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) 2648 { 2649 BT_DBG("chan %p", chan); 2650 2651 chan->expected_tx_seq = chan->buffer_seq; 2652 l2cap_seq_list_clear(&chan->srej_list); 2653 skb_queue_purge(&chan->srej_q); 2654 chan->rx_state = L2CAP_RX_STATE_RECV; 2655 } 2656 2657 static void l2cap_tx_state_xmit(struct l2cap_chan *chan, 2658 struct l2cap_ctrl *control, 2659 struct sk_buff_head *skbs, u8 event) 2660 { 2661 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2662 event); 2663 2664 switch (event) { 2665 case L2CAP_EV_DATA_REQUEST: 2666 if (chan->tx_send_head == NULL) 2667 chan->tx_send_head = skb_peek(skbs); 2668 2669 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2670 l2cap_ertm_send(chan); 2671 break; 2672 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2673 BT_DBG("Enter LOCAL_BUSY"); 2674 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2675 2676 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2677 /* The SREJ_SENT state must be aborted if we are to 2678 * enter the LOCAL_BUSY state. 2679 */ 2680 l2cap_abort_rx_srej_sent(chan); 2681 } 2682 2683 l2cap_send_ack(chan); 2684 2685 break; 2686 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2687 BT_DBG("Exit LOCAL_BUSY"); 2688 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2689 2690 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2691 struct l2cap_ctrl local_control; 2692 2693 memset(&local_control, 0, sizeof(local_control)); 2694 local_control.sframe = 1; 2695 local_control.super = L2CAP_SUPER_RR; 2696 local_control.poll = 1; 2697 local_control.reqseq = chan->buffer_seq; 2698 l2cap_send_sframe(chan, &local_control); 2699 2700 chan->retry_count = 1; 2701 __set_monitor_timer(chan); 2702 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2703 } 2704 break; 2705 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2706 l2cap_process_reqseq(chan, control->reqseq); 2707 break; 2708 case L2CAP_EV_EXPLICIT_POLL: 2709 l2cap_send_rr_or_rnr(chan, 1); 2710 chan->retry_count = 1; 2711 __set_monitor_timer(chan); 2712 __clear_ack_timer(chan); 2713 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2714 break; 2715 case L2CAP_EV_RETRANS_TO: 2716 l2cap_send_rr_or_rnr(chan, 1); 2717 chan->retry_count = 1; 2718 __set_monitor_timer(chan); 2719 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2720 break; 2721 case L2CAP_EV_RECV_FBIT: 2722 /* Nothing to process */ 2723 break; 2724 default: 2725 break; 2726 } 2727 } 2728 2729 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, 2730 struct l2cap_ctrl *control, 2731 struct sk_buff_head *skbs, u8 event) 2732 { 2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2734 event); 2735 2736 switch (event) { 2737 case L2CAP_EV_DATA_REQUEST: 2738 if (chan->tx_send_head == NULL) 2739 chan->tx_send_head = skb_peek(skbs); 2740 /* Queue data, but don't send. */ 2741 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2742 break; 2743 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2744 BT_DBG("Enter LOCAL_BUSY"); 2745 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2746 2747 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2748 /* The SREJ_SENT state must be aborted if we are to 2749 * enter the LOCAL_BUSY state. 2750 */ 2751 l2cap_abort_rx_srej_sent(chan); 2752 } 2753 2754 l2cap_send_ack(chan); 2755 2756 break; 2757 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2758 BT_DBG("Exit LOCAL_BUSY"); 2759 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2760 2761 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2762 struct l2cap_ctrl local_control; 2763 memset(&local_control, 0, sizeof(local_control)); 2764 local_control.sframe = 1; 2765 local_control.super = L2CAP_SUPER_RR; 2766 local_control.poll = 1; 2767 local_control.reqseq = chan->buffer_seq; 2768 l2cap_send_sframe(chan, &local_control); 2769 2770 chan->retry_count = 1; 2771 __set_monitor_timer(chan); 2772 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2773 } 2774 break; 2775 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2776 l2cap_process_reqseq(chan, control->reqseq); 2777 2778 /* Fall through */ 2779 2780 case L2CAP_EV_RECV_FBIT: 2781 if (control && control->final) { 2782 __clear_monitor_timer(chan); 2783 if (chan->unacked_frames > 0) 2784 __set_retrans_timer(chan); 2785 chan->retry_count = 0; 2786 chan->tx_state = L2CAP_TX_STATE_XMIT; 2787 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); 2788 } 2789 break; 2790 case L2CAP_EV_EXPLICIT_POLL: 2791 /* Ignore */ 2792 break; 2793 case L2CAP_EV_MONITOR_TO: 2794 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { 2795 l2cap_send_rr_or_rnr(chan, 1); 2796 __set_monitor_timer(chan); 2797 chan->retry_count++; 2798 } else { 2799 l2cap_send_disconn_req(chan, ECONNABORTED); 2800 } 2801 break; 2802 default: 2803 break; 2804 } 2805 } 2806 2807 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 2808 struct sk_buff_head *skbs, u8 event) 2809 { 2810 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", 2811 chan, control, skbs, event, chan->tx_state); 2812 2813 switch (chan->tx_state) { 2814 case L2CAP_TX_STATE_XMIT: 2815 l2cap_tx_state_xmit(chan, control, skbs, event); 2816 break; 2817 case L2CAP_TX_STATE_WAIT_F: 2818 l2cap_tx_state_wait_f(chan, control, skbs, event); 2819 break; 2820 default: 2821 /* Ignore event */ 2822 break; 2823 } 2824 } 2825 2826 static void l2cap_pass_to_tx(struct l2cap_chan *chan, 2827 struct l2cap_ctrl *control) 2828 { 2829 BT_DBG("chan %p, control %p", chan, control); 2830 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); 2831 } 2832 2833 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, 2834 struct l2cap_ctrl *control) 2835 { 2836 BT_DBG("chan %p, control %p", chan, control); 2837 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); 2838 } 2839 2840 /* Copy frame to all raw sockets on that connection */ 2841 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2842 { 2843 struct sk_buff *nskb; 2844 struct l2cap_chan *chan; 2845 2846 BT_DBG("conn %p", conn); 2847 2848 mutex_lock(&conn->chan_lock); 2849 2850 list_for_each_entry(chan, &conn->chan_l, list) { 2851 if (chan->chan_type != L2CAP_CHAN_RAW) 2852 continue; 2853 2854 /* Don't send frame to the channel it came from */ 2855 if (bt_cb(skb)->l2cap.chan == chan) 2856 continue; 2857 2858 nskb = skb_clone(skb, GFP_KERNEL); 2859 if (!nskb) 2860 continue; 2861 if (chan->ops->recv(chan, nskb)) 2862 kfree_skb(nskb); 2863 } 2864 2865 mutex_unlock(&conn->chan_lock); 2866 } 2867 2868 /* ---- L2CAP signalling commands ---- */ 2869 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, 2870 u8 ident, u16 dlen, void *data) 2871 { 2872 struct sk_buff *skb, **frag; 2873 struct l2cap_cmd_hdr *cmd; 2874 struct l2cap_hdr *lh; 2875 int len, count; 2876 2877 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 2878 conn, code, ident, dlen); 2879 2880 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) 2881 return NULL; 2882 2883 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2884 count = min_t(unsigned int, conn->mtu, len); 2885 2886 skb = bt_skb_alloc(count, GFP_KERNEL); 2887 if (!skb) 2888 return NULL; 2889 2890 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2891 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2892 2893 if (conn->hcon->type == LE_LINK) 2894 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2895 else 2896 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2897 2898 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 2899 cmd->code = code; 2900 cmd->ident = ident; 2901 cmd->len = cpu_to_le16(dlen); 2902 2903 if (dlen) { 2904 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; 2905 memcpy(skb_put(skb, count), data, count); 2906 data += count; 2907 } 2908 2909 len -= skb->len; 2910 2911 /* Continuation fragments (no L2CAP header) */ 2912 frag = &skb_shinfo(skb)->frag_list; 2913 while (len) { 2914 count = min_t(unsigned int, conn->mtu, len); 2915 2916 *frag = bt_skb_alloc(count, GFP_KERNEL); 2917 if (!*frag) 2918 goto fail; 2919 2920 memcpy(skb_put(*frag, count), data, count); 2921 2922 len -= count; 2923 data += count; 2924 2925 frag = &(*frag)->next; 2926 } 2927 2928 return skb; 2929 2930 fail: 2931 kfree_skb(skb); 2932 return NULL; 2933 } 2934 2935 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, 2936 unsigned long *val) 2937 { 2938 struct l2cap_conf_opt *opt = *ptr; 2939 int len; 2940 2941 len = L2CAP_CONF_OPT_SIZE + opt->len; 2942 *ptr += len; 2943 2944 *type = opt->type; 2945 *olen = opt->len; 2946 2947 switch (opt->len) { 2948 case 1: 2949 *val = *((u8 *) opt->val); 2950 break; 2951 2952 case 2: 2953 *val = get_unaligned_le16(opt->val); 2954 break; 2955 2956 case 4: 2957 *val = get_unaligned_le32(opt->val); 2958 break; 2959 2960 default: 2961 *val = (unsigned long) opt->val; 2962 break; 2963 } 2964 2965 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); 2966 return len; 2967 } 2968 2969 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) 2970 { 2971 struct l2cap_conf_opt *opt = *ptr; 2972 2973 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); 2974 2975 opt->type = type; 2976 opt->len = len; 2977 2978 switch (len) { 2979 case 1: 2980 *((u8 *) opt->val) = val; 2981 break; 2982 2983 case 2: 2984 put_unaligned_le16(val, opt->val); 2985 break; 2986 2987 case 4: 2988 put_unaligned_le32(val, opt->val); 2989 break; 2990 2991 default: 2992 memcpy(opt->val, (void *) val, len); 2993 break; 2994 } 2995 2996 *ptr += L2CAP_CONF_OPT_SIZE + len; 2997 } 2998 2999 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) 3000 { 3001 struct l2cap_conf_efs efs; 3002 3003 switch (chan->mode) { 3004 case L2CAP_MODE_ERTM: 3005 efs.id = chan->local_id; 3006 efs.stype = chan->local_stype; 3007 efs.msdu = cpu_to_le16(chan->local_msdu); 3008 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3009 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 3010 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); 3011 break; 3012 3013 case L2CAP_MODE_STREAMING: 3014 efs.id = 1; 3015 efs.stype = L2CAP_SERV_BESTEFFORT; 3016 efs.msdu = cpu_to_le16(chan->local_msdu); 3017 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3018 efs.acc_lat = 0; 3019 efs.flush_to = 0; 3020 break; 3021 3022 default: 3023 return; 3024 } 3025 3026 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), 3027 (unsigned long) &efs); 3028 } 3029 3030 static void l2cap_ack_timeout(struct work_struct *work) 3031 { 3032 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 3033 ack_timer.work); 3034 u16 frames_to_ack; 3035 3036 BT_DBG("chan %p", chan); 3037 3038 l2cap_chan_lock(chan); 3039 3040 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 3041 chan->last_acked_seq); 3042 3043 if (frames_to_ack) 3044 l2cap_send_rr_or_rnr(chan, 0); 3045 3046 l2cap_chan_unlock(chan); 3047 l2cap_chan_put(chan); 3048 } 3049 3050 int l2cap_ertm_init(struct l2cap_chan *chan) 3051 { 3052 int err; 3053 3054 chan->next_tx_seq = 0; 3055 chan->expected_tx_seq = 0; 3056 chan->expected_ack_seq = 0; 3057 chan->unacked_frames = 0; 3058 chan->buffer_seq = 0; 3059 chan->frames_sent = 0; 3060 chan->last_acked_seq = 0; 3061 chan->sdu = NULL; 3062 chan->sdu_last_frag = NULL; 3063 chan->sdu_len = 0; 3064 3065 skb_queue_head_init(&chan->tx_q); 3066 3067 chan->local_amp_id = AMP_ID_BREDR; 3068 chan->move_id = AMP_ID_BREDR; 3069 chan->move_state = L2CAP_MOVE_STABLE; 3070 chan->move_role = L2CAP_MOVE_ROLE_NONE; 3071 3072 if (chan->mode != L2CAP_MODE_ERTM) 3073 return 0; 3074 3075 chan->rx_state = L2CAP_RX_STATE_RECV; 3076 chan->tx_state = L2CAP_TX_STATE_XMIT; 3077 3078 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 3079 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 3080 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); 3081 3082 skb_queue_head_init(&chan->srej_q); 3083 3084 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 3085 if (err < 0) 3086 return err; 3087 3088 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 3089 if (err < 0) 3090 l2cap_seq_list_free(&chan->srej_list); 3091 3092 return err; 3093 } 3094 3095 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 3096 { 3097 switch (mode) { 3098 case L2CAP_MODE_STREAMING: 3099 case L2CAP_MODE_ERTM: 3100 if (l2cap_mode_supported(mode, remote_feat_mask)) 3101 return mode; 3102 /* fall through */ 3103 default: 3104 return L2CAP_MODE_BASIC; 3105 } 3106 } 3107 3108 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) 3109 { 3110 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && 3111 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)); 3112 } 3113 3114 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) 3115 { 3116 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && 3117 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW)); 3118 } 3119 3120 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, 3121 struct l2cap_conf_rfc *rfc) 3122 { 3123 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) { 3124 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to; 3125 3126 /* Class 1 devices have must have ERTM timeouts 3127 * exceeding the Link Supervision Timeout. The 3128 * default Link Supervision Timeout for AMP 3129 * controllers is 10 seconds. 3130 * 3131 * Class 1 devices use 0xffffffff for their 3132 * best-effort flush timeout, so the clamping logic 3133 * will result in a timeout that meets the above 3134 * requirement. ERTM timeouts are 16-bit values, so 3135 * the maximum timeout is 65.535 seconds. 3136 */ 3137 3138 /* Convert timeout to milliseconds and round */ 3139 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000); 3140 3141 /* This is the recommended formula for class 2 devices 3142 * that start ERTM timers when packets are sent to the 3143 * controller. 3144 */ 3145 ertm_to = 3 * ertm_to + 500; 3146 3147 if (ertm_to > 0xffff) 3148 ertm_to = 0xffff; 3149 3150 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to); 3151 rfc->monitor_timeout = rfc->retrans_timeout; 3152 } else { 3153 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3154 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3155 } 3156 } 3157 3158 static inline void l2cap_txwin_setup(struct l2cap_chan *chan) 3159 { 3160 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && 3161 __l2cap_ews_supported(chan->conn)) { 3162 /* use extended control field */ 3163 set_bit(FLAG_EXT_CTRL, &chan->flags); 3164 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3165 } else { 3166 chan->tx_win = min_t(u16, chan->tx_win, 3167 L2CAP_DEFAULT_TX_WINDOW); 3168 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 3169 } 3170 chan->ack_win = chan->tx_win; 3171 } 3172 3173 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) 3174 { 3175 struct l2cap_conf_req *req = data; 3176 struct l2cap_conf_rfc rfc = { .mode = chan->mode }; 3177 void *ptr = req->data; 3178 u16 size; 3179 3180 BT_DBG("chan %p", chan); 3181 3182 if (chan->num_conf_req || chan->num_conf_rsp) 3183 goto done; 3184 3185 switch (chan->mode) { 3186 case L2CAP_MODE_STREAMING: 3187 case L2CAP_MODE_ERTM: 3188 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) 3189 break; 3190 3191 if (__l2cap_efs_supported(chan->conn)) 3192 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3193 3194 /* fall through */ 3195 default: 3196 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); 3197 break; 3198 } 3199 3200 done: 3201 if (chan->imtu != L2CAP_DEFAULT_MTU) 3202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); 3203 3204 switch (chan->mode) { 3205 case L2CAP_MODE_BASIC: 3206 if (disable_ertm) 3207 break; 3208 3209 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3210 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3211 break; 3212 3213 rfc.mode = L2CAP_MODE_BASIC; 3214 rfc.txwin_size = 0; 3215 rfc.max_transmit = 0; 3216 rfc.retrans_timeout = 0; 3217 rfc.monitor_timeout = 0; 3218 rfc.max_pdu_size = 0; 3219 3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3221 (unsigned long) &rfc); 3222 break; 3223 3224 case L2CAP_MODE_ERTM: 3225 rfc.mode = L2CAP_MODE_ERTM; 3226 rfc.max_transmit = chan->max_tx; 3227 3228 __l2cap_set_ertm_timeouts(chan, &rfc); 3229 3230 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3231 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3232 L2CAP_FCS_SIZE); 3233 rfc.max_pdu_size = cpu_to_le16(size); 3234 3235 l2cap_txwin_setup(chan); 3236 3237 rfc.txwin_size = min_t(u16, chan->tx_win, 3238 L2CAP_DEFAULT_TX_WINDOW); 3239 3240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3241 (unsigned long) &rfc); 3242 3243 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3244 l2cap_add_opt_efs(&ptr, chan); 3245 3246 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3248 chan->tx_win); 3249 3250 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3251 if (chan->fcs == L2CAP_FCS_NONE || 3252 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3253 chan->fcs = L2CAP_FCS_NONE; 3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3255 chan->fcs); 3256 } 3257 break; 3258 3259 case L2CAP_MODE_STREAMING: 3260 l2cap_txwin_setup(chan); 3261 rfc.mode = L2CAP_MODE_STREAMING; 3262 rfc.txwin_size = 0; 3263 rfc.max_transmit = 0; 3264 rfc.retrans_timeout = 0; 3265 rfc.monitor_timeout = 0; 3266 3267 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3268 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3269 L2CAP_FCS_SIZE); 3270 rfc.max_pdu_size = cpu_to_le16(size); 3271 3272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3273 (unsigned long) &rfc); 3274 3275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3276 l2cap_add_opt_efs(&ptr, chan); 3277 3278 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3279 if (chan->fcs == L2CAP_FCS_NONE || 3280 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3281 chan->fcs = L2CAP_FCS_NONE; 3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3283 chan->fcs); 3284 } 3285 break; 3286 } 3287 3288 req->dcid = cpu_to_le16(chan->dcid); 3289 req->flags = cpu_to_le16(0); 3290 3291 return ptr - data; 3292 } 3293 3294 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) 3295 { 3296 struct l2cap_conf_rsp *rsp = data; 3297 void *ptr = rsp->data; 3298 void *req = chan->conf_req; 3299 int len = chan->conf_len; 3300 int type, hint, olen; 3301 unsigned long val; 3302 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3303 struct l2cap_conf_efs efs; 3304 u8 remote_efs = 0; 3305 u16 mtu = L2CAP_DEFAULT_MTU; 3306 u16 result = L2CAP_CONF_SUCCESS; 3307 u16 size; 3308 3309 BT_DBG("chan %p", chan); 3310 3311 while (len >= L2CAP_CONF_OPT_SIZE) { 3312 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 3313 3314 hint = type & L2CAP_CONF_HINT; 3315 type &= L2CAP_CONF_MASK; 3316 3317 switch (type) { 3318 case L2CAP_CONF_MTU: 3319 mtu = val; 3320 break; 3321 3322 case L2CAP_CONF_FLUSH_TO: 3323 chan->flush_to = val; 3324 break; 3325 3326 case L2CAP_CONF_QOS: 3327 break; 3328 3329 case L2CAP_CONF_RFC: 3330 if (olen == sizeof(rfc)) 3331 memcpy(&rfc, (void *) val, olen); 3332 break; 3333 3334 case L2CAP_CONF_FCS: 3335 if (val == L2CAP_FCS_NONE) 3336 set_bit(CONF_RECV_NO_FCS, &chan->conf_state); 3337 break; 3338 3339 case L2CAP_CONF_EFS: 3340 remote_efs = 1; 3341 if (olen == sizeof(efs)) 3342 memcpy(&efs, (void *) val, olen); 3343 break; 3344 3345 case L2CAP_CONF_EWS: 3346 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) 3347 return -ECONNREFUSED; 3348 3349 set_bit(FLAG_EXT_CTRL, &chan->flags); 3350 set_bit(CONF_EWS_RECV, &chan->conf_state); 3351 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3352 chan->remote_tx_win = val; 3353 break; 3354 3355 default: 3356 if (hint) 3357 break; 3358 3359 result = L2CAP_CONF_UNKNOWN; 3360 *((u8 *) ptr++) = type; 3361 break; 3362 } 3363 } 3364 3365 if (chan->num_conf_rsp || chan->num_conf_req > 1) 3366 goto done; 3367 3368 switch (chan->mode) { 3369 case L2CAP_MODE_STREAMING: 3370 case L2CAP_MODE_ERTM: 3371 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { 3372 chan->mode = l2cap_select_mode(rfc.mode, 3373 chan->conn->feat_mask); 3374 break; 3375 } 3376 3377 if (remote_efs) { 3378 if (__l2cap_efs_supported(chan->conn)) 3379 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3380 else 3381 return -ECONNREFUSED; 3382 } 3383 3384 if (chan->mode != rfc.mode) 3385 return -ECONNREFUSED; 3386 3387 break; 3388 } 3389 3390 done: 3391 if (chan->mode != rfc.mode) { 3392 result = L2CAP_CONF_UNACCEPT; 3393 rfc.mode = chan->mode; 3394 3395 if (chan->num_conf_rsp == 1) 3396 return -ECONNREFUSED; 3397 3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3399 (unsigned long) &rfc); 3400 } 3401 3402 if (result == L2CAP_CONF_SUCCESS) { 3403 /* Configure output options and let the other side know 3404 * which ones we don't like. */ 3405 3406 if (mtu < L2CAP_DEFAULT_MIN_MTU) 3407 result = L2CAP_CONF_UNACCEPT; 3408 else { 3409 chan->omtu = mtu; 3410 set_bit(CONF_MTU_DONE, &chan->conf_state); 3411 } 3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); 3413 3414 if (remote_efs) { 3415 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3416 efs.stype != L2CAP_SERV_NOTRAFIC && 3417 efs.stype != chan->local_stype) { 3418 3419 result = L2CAP_CONF_UNACCEPT; 3420 3421 if (chan->num_conf_req >= 1) 3422 return -ECONNREFUSED; 3423 3424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3425 sizeof(efs), 3426 (unsigned long) &efs); 3427 } else { 3428 /* Send PENDING Conf Rsp */ 3429 result = L2CAP_CONF_PENDING; 3430 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 3431 } 3432 } 3433 3434 switch (rfc.mode) { 3435 case L2CAP_MODE_BASIC: 3436 chan->fcs = L2CAP_FCS_NONE; 3437 set_bit(CONF_MODE_DONE, &chan->conf_state); 3438 break; 3439 3440 case L2CAP_MODE_ERTM: 3441 if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) 3442 chan->remote_tx_win = rfc.txwin_size; 3443 else 3444 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 3445 3446 chan->remote_max_tx = rfc.max_transmit; 3447 3448 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3449 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3450 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3451 rfc.max_pdu_size = cpu_to_le16(size); 3452 chan->remote_mps = size; 3453 3454 __l2cap_set_ertm_timeouts(chan, &rfc); 3455 3456 set_bit(CONF_MODE_DONE, &chan->conf_state); 3457 3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3459 sizeof(rfc), (unsigned long) &rfc); 3460 3461 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3462 chan->remote_id = efs.id; 3463 chan->remote_stype = efs.stype; 3464 chan->remote_msdu = le16_to_cpu(efs.msdu); 3465 chan->remote_flush_to = 3466 le32_to_cpu(efs.flush_to); 3467 chan->remote_acc_lat = 3468 le32_to_cpu(efs.acc_lat); 3469 chan->remote_sdu_itime = 3470 le32_to_cpu(efs.sdu_itime); 3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3472 sizeof(efs), 3473 (unsigned long) &efs); 3474 } 3475 break; 3476 3477 case L2CAP_MODE_STREAMING: 3478 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3479 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3480 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3481 rfc.max_pdu_size = cpu_to_le16(size); 3482 chan->remote_mps = size; 3483 3484 set_bit(CONF_MODE_DONE, &chan->conf_state); 3485 3486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3487 (unsigned long) &rfc); 3488 3489 break; 3490 3491 default: 3492 result = L2CAP_CONF_UNACCEPT; 3493 3494 memset(&rfc, 0, sizeof(rfc)); 3495 rfc.mode = chan->mode; 3496 } 3497 3498 if (result == L2CAP_CONF_SUCCESS) 3499 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 3500 } 3501 rsp->scid = cpu_to_le16(chan->dcid); 3502 rsp->result = cpu_to_le16(result); 3503 rsp->flags = cpu_to_le16(0); 3504 3505 return ptr - data; 3506 } 3507 3508 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, 3509 void *data, u16 *result) 3510 { 3511 struct l2cap_conf_req *req = data; 3512 void *ptr = req->data; 3513 int type, olen; 3514 unsigned long val; 3515 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3516 struct l2cap_conf_efs efs; 3517 3518 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 3519 3520 while (len >= L2CAP_CONF_OPT_SIZE) { 3521 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3522 3523 switch (type) { 3524 case L2CAP_CONF_MTU: 3525 if (val < L2CAP_DEFAULT_MIN_MTU) { 3526 *result = L2CAP_CONF_UNACCEPT; 3527 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3528 } else 3529 chan->imtu = val; 3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); 3531 break; 3532 3533 case L2CAP_CONF_FLUSH_TO: 3534 chan->flush_to = val; 3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 3536 2, chan->flush_to); 3537 break; 3538 3539 case L2CAP_CONF_RFC: 3540 if (olen == sizeof(rfc)) 3541 memcpy(&rfc, (void *)val, olen); 3542 3543 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && 3544 rfc.mode != chan->mode) 3545 return -ECONNREFUSED; 3546 3547 chan->fcs = 0; 3548 3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3550 sizeof(rfc), (unsigned long) &rfc); 3551 break; 3552 3553 case L2CAP_CONF_EWS: 3554 chan->ack_win = min_t(u16, val, chan->ack_win); 3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3556 chan->tx_win); 3557 break; 3558 3559 case L2CAP_CONF_EFS: 3560 if (olen == sizeof(efs)) 3561 memcpy(&efs, (void *)val, olen); 3562 3563 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3564 efs.stype != L2CAP_SERV_NOTRAFIC && 3565 efs.stype != chan->local_stype) 3566 return -ECONNREFUSED; 3567 3568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3569 (unsigned long) &efs); 3570 break; 3571 3572 case L2CAP_CONF_FCS: 3573 if (*result == L2CAP_CONF_PENDING) 3574 if (val == L2CAP_FCS_NONE) 3575 set_bit(CONF_RECV_NO_FCS, 3576 &chan->conf_state); 3577 break; 3578 } 3579 } 3580 3581 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) 3582 return -ECONNREFUSED; 3583 3584 chan->mode = rfc.mode; 3585 3586 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { 3587 switch (rfc.mode) { 3588 case L2CAP_MODE_ERTM: 3589 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3590 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3591 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3592 if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) 3593 chan->ack_win = min_t(u16, chan->ack_win, 3594 rfc.txwin_size); 3595 3596 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3597 chan->local_msdu = le16_to_cpu(efs.msdu); 3598 chan->local_sdu_itime = 3599 le32_to_cpu(efs.sdu_itime); 3600 chan->local_acc_lat = le32_to_cpu(efs.acc_lat); 3601 chan->local_flush_to = 3602 le32_to_cpu(efs.flush_to); 3603 } 3604 break; 3605 3606 case L2CAP_MODE_STREAMING: 3607 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3608 } 3609 } 3610 3611 req->dcid = cpu_to_le16(chan->dcid); 3612 req->flags = cpu_to_le16(0); 3613 3614 return ptr - data; 3615 } 3616 3617 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, 3618 u16 result, u16 flags) 3619 { 3620 struct l2cap_conf_rsp *rsp = data; 3621 void *ptr = rsp->data; 3622 3623 BT_DBG("chan %p", chan); 3624 3625 rsp->scid = cpu_to_le16(chan->dcid); 3626 rsp->result = cpu_to_le16(result); 3627 rsp->flags = cpu_to_le16(flags); 3628 3629 return ptr - data; 3630 } 3631 3632 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) 3633 { 3634 struct l2cap_le_conn_rsp rsp; 3635 struct l2cap_conn *conn = chan->conn; 3636 3637 BT_DBG("chan %p", chan); 3638 3639 rsp.dcid = cpu_to_le16(chan->scid); 3640 rsp.mtu = cpu_to_le16(chan->imtu); 3641 rsp.mps = cpu_to_le16(chan->mps); 3642 rsp.credits = cpu_to_le16(chan->rx_credits); 3643 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3644 3645 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 3646 &rsp); 3647 } 3648 3649 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) 3650 { 3651 struct l2cap_conn_rsp rsp; 3652 struct l2cap_conn *conn = chan->conn; 3653 u8 buf[128]; 3654 u8 rsp_code; 3655 3656 rsp.scid = cpu_to_le16(chan->dcid); 3657 rsp.dcid = cpu_to_le16(chan->scid); 3658 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3659 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3660 3661 if (chan->hs_hcon) 3662 rsp_code = L2CAP_CREATE_CHAN_RSP; 3663 else 3664 rsp_code = L2CAP_CONN_RSP; 3665 3666 BT_DBG("chan %p rsp_code %u", chan, rsp_code); 3667 3668 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); 3669 3670 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 3671 return; 3672 3673 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3674 l2cap_build_conf_req(chan, buf), buf); 3675 chan->num_conf_req++; 3676 } 3677 3678 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) 3679 { 3680 int type, olen; 3681 unsigned long val; 3682 /* Use sane default values in case a misbehaving remote device 3683 * did not send an RFC or extended window size option. 3684 */ 3685 u16 txwin_ext = chan->ack_win; 3686 struct l2cap_conf_rfc rfc = { 3687 .mode = chan->mode, 3688 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), 3689 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), 3690 .max_pdu_size = cpu_to_le16(chan->imtu), 3691 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), 3692 }; 3693 3694 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 3695 3696 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) 3697 return; 3698 3699 while (len >= L2CAP_CONF_OPT_SIZE) { 3700 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3701 3702 switch (type) { 3703 case L2CAP_CONF_RFC: 3704 if (olen == sizeof(rfc)) 3705 memcpy(&rfc, (void *)val, olen); 3706 break; 3707 case L2CAP_CONF_EWS: 3708 txwin_ext = val; 3709 break; 3710 } 3711 } 3712 3713 switch (rfc.mode) { 3714 case L2CAP_MODE_ERTM: 3715 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3716 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3717 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3718 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3719 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); 3720 else 3721 chan->ack_win = min_t(u16, chan->ack_win, 3722 rfc.txwin_size); 3723 break; 3724 case L2CAP_MODE_STREAMING: 3725 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3726 } 3727 } 3728 3729 static inline int l2cap_command_rej(struct l2cap_conn *conn, 3730 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 3731 u8 *data) 3732 { 3733 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 3734 3735 if (cmd_len < sizeof(*rej)) 3736 return -EPROTO; 3737 3738 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 3739 return 0; 3740 3741 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 3742 cmd->ident == conn->info_ident) { 3743 cancel_delayed_work(&conn->info_timer); 3744 3745 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3746 conn->info_ident = 0; 3747 3748 l2cap_conn_start(conn); 3749 } 3750 3751 return 0; 3752 } 3753 3754 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, 3755 struct l2cap_cmd_hdr *cmd, 3756 u8 *data, u8 rsp_code, u8 amp_id) 3757 { 3758 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 3759 struct l2cap_conn_rsp rsp; 3760 struct l2cap_chan *chan = NULL, *pchan; 3761 int result, status = L2CAP_CS_NO_INFO; 3762 3763 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 3764 __le16 psm = req->psm; 3765 3766 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); 3767 3768 /* Check if we have socket listening on psm */ 3769 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 3770 &conn->hcon->dst, ACL_LINK); 3771 if (!pchan) { 3772 result = L2CAP_CR_BAD_PSM; 3773 goto sendresp; 3774 } 3775 3776 mutex_lock(&conn->chan_lock); 3777 l2cap_chan_lock(pchan); 3778 3779 /* Check if the ACL is secure enough (if not SDP) */ 3780 if (psm != cpu_to_le16(L2CAP_PSM_SDP) && 3781 !hci_conn_check_link_mode(conn->hcon)) { 3782 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3783 result = L2CAP_CR_SEC_BLOCK; 3784 goto response; 3785 } 3786 3787 result = L2CAP_CR_NO_MEM; 3788 3789 /* Check if we already have channel with that dcid */ 3790 if (__l2cap_get_chan_by_dcid(conn, scid)) 3791 goto response; 3792 3793 chan = pchan->ops->new_connection(pchan); 3794 if (!chan) 3795 goto response; 3796 3797 /* For certain devices (ex: HID mouse), support for authentication, 3798 * pairing and bonding is optional. For such devices, inorder to avoid 3799 * the ACL alive for too long after L2CAP disconnection, reset the ACL 3800 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. 3801 */ 3802 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 3803 3804 bacpy(&chan->src, &conn->hcon->src); 3805 bacpy(&chan->dst, &conn->hcon->dst); 3806 chan->src_type = bdaddr_src_type(conn->hcon); 3807 chan->dst_type = bdaddr_dst_type(conn->hcon); 3808 chan->psm = psm; 3809 chan->dcid = scid; 3810 chan->local_amp_id = amp_id; 3811 3812 __l2cap_chan_add(conn, chan); 3813 3814 dcid = chan->scid; 3815 3816 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 3817 3818 chan->ident = cmd->ident; 3819 3820 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 3821 if (l2cap_chan_check_security(chan, false)) { 3822 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 3823 l2cap_state_change(chan, BT_CONNECT2); 3824 result = L2CAP_CR_PEND; 3825 status = L2CAP_CS_AUTHOR_PEND; 3826 chan->ops->defer(chan); 3827 } else { 3828 /* Force pending result for AMP controllers. 3829 * The connection will succeed after the 3830 * physical link is up. 3831 */ 3832 if (amp_id == AMP_ID_BREDR) { 3833 l2cap_state_change(chan, BT_CONFIG); 3834 result = L2CAP_CR_SUCCESS; 3835 } else { 3836 l2cap_state_change(chan, BT_CONNECT2); 3837 result = L2CAP_CR_PEND; 3838 } 3839 status = L2CAP_CS_NO_INFO; 3840 } 3841 } else { 3842 l2cap_state_change(chan, BT_CONNECT2); 3843 result = L2CAP_CR_PEND; 3844 status = L2CAP_CS_AUTHEN_PEND; 3845 } 3846 } else { 3847 l2cap_state_change(chan, BT_CONNECT2); 3848 result = L2CAP_CR_PEND; 3849 status = L2CAP_CS_NO_INFO; 3850 } 3851 3852 response: 3853 l2cap_chan_unlock(pchan); 3854 mutex_unlock(&conn->chan_lock); 3855 l2cap_chan_put(pchan); 3856 3857 sendresp: 3858 rsp.scid = cpu_to_le16(scid); 3859 rsp.dcid = cpu_to_le16(dcid); 3860 rsp.result = cpu_to_le16(result); 3861 rsp.status = cpu_to_le16(status); 3862 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); 3863 3864 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3865 struct l2cap_info_req info; 3866 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3867 3868 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3869 conn->info_ident = l2cap_get_ident(conn); 3870 3871 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 3872 3873 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 3874 sizeof(info), &info); 3875 } 3876 3877 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && 3878 result == L2CAP_CR_SUCCESS) { 3879 u8 buf[128]; 3880 set_bit(CONF_REQ_SENT, &chan->conf_state); 3881 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3882 l2cap_build_conf_req(chan, buf), buf); 3883 chan->num_conf_req++; 3884 } 3885 3886 return chan; 3887 } 3888 3889 static int l2cap_connect_req(struct l2cap_conn *conn, 3890 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 3891 { 3892 struct hci_dev *hdev = conn->hcon->hdev; 3893 struct hci_conn *hcon = conn->hcon; 3894 3895 if (cmd_len < sizeof(struct l2cap_conn_req)) 3896 return -EPROTO; 3897 3898 hci_dev_lock(hdev); 3899 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3900 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) 3901 mgmt_device_connected(hdev, hcon, 0, NULL, 0); 3902 hci_dev_unlock(hdev); 3903 3904 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 3905 return 0; 3906 } 3907 3908 static int l2cap_connect_create_rsp(struct l2cap_conn *conn, 3909 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 3910 u8 *data) 3911 { 3912 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 3913 u16 scid, dcid, result, status; 3914 struct l2cap_chan *chan; 3915 u8 req[128]; 3916 int err; 3917 3918 if (cmd_len < sizeof(*rsp)) 3919 return -EPROTO; 3920 3921 scid = __le16_to_cpu(rsp->scid); 3922 dcid = __le16_to_cpu(rsp->dcid); 3923 result = __le16_to_cpu(rsp->result); 3924 status = __le16_to_cpu(rsp->status); 3925 3926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", 3927 dcid, scid, result, status); 3928 3929 mutex_lock(&conn->chan_lock); 3930 3931 if (scid) { 3932 chan = __l2cap_get_chan_by_scid(conn, scid); 3933 if (!chan) { 3934 err = -EBADSLT; 3935 goto unlock; 3936 } 3937 } else { 3938 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 3939 if (!chan) { 3940 err = -EBADSLT; 3941 goto unlock; 3942 } 3943 } 3944 3945 err = 0; 3946 3947 l2cap_chan_lock(chan); 3948 3949 switch (result) { 3950 case L2CAP_CR_SUCCESS: 3951 l2cap_state_change(chan, BT_CONFIG); 3952 chan->ident = 0; 3953 chan->dcid = dcid; 3954 clear_bit(CONF_CONNECT_PEND, &chan->conf_state); 3955 3956 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 3957 break; 3958 3959 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3960 l2cap_build_conf_req(chan, req), req); 3961 chan->num_conf_req++; 3962 break; 3963 3964 case L2CAP_CR_PEND: 3965 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 3966 break; 3967 3968 default: 3969 l2cap_chan_del(chan, ECONNREFUSED); 3970 break; 3971 } 3972 3973 l2cap_chan_unlock(chan); 3974 3975 unlock: 3976 mutex_unlock(&conn->chan_lock); 3977 3978 return err; 3979 } 3980 3981 static inline void set_default_fcs(struct l2cap_chan *chan) 3982 { 3983 /* FCS is enabled only in ERTM or streaming mode, if one or both 3984 * sides request it. 3985 */ 3986 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 3987 chan->fcs = L2CAP_FCS_NONE; 3988 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) 3989 chan->fcs = L2CAP_FCS_CRC16; 3990 } 3991 3992 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, 3993 u8 ident, u16 flags) 3994 { 3995 struct l2cap_conn *conn = chan->conn; 3996 3997 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, 3998 flags); 3999 4000 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 4001 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 4002 4003 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, 4004 l2cap_build_conf_rsp(chan, data, 4005 L2CAP_CONF_SUCCESS, flags), data); 4006 } 4007 4008 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, 4009 u16 scid, u16 dcid) 4010 { 4011 struct l2cap_cmd_rej_cid rej; 4012 4013 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 4014 rej.scid = __cpu_to_le16(scid); 4015 rej.dcid = __cpu_to_le16(dcid); 4016 4017 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4018 } 4019 4020 static inline int l2cap_config_req(struct l2cap_conn *conn, 4021 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4022 u8 *data) 4023 { 4024 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 4025 u16 dcid, flags; 4026 u8 rsp[64]; 4027 struct l2cap_chan *chan; 4028 int len, err = 0; 4029 4030 if (cmd_len < sizeof(*req)) 4031 return -EPROTO; 4032 4033 dcid = __le16_to_cpu(req->dcid); 4034 flags = __le16_to_cpu(req->flags); 4035 4036 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 4037 4038 chan = l2cap_get_chan_by_scid(conn, dcid); 4039 if (!chan) { 4040 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); 4041 return 0; 4042 } 4043 4044 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 4045 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, 4046 chan->dcid); 4047 goto unlock; 4048 } 4049 4050 /* Reject if config buffer is too small. */ 4051 len = cmd_len - sizeof(*req); 4052 if (chan->conf_len + len > sizeof(chan->conf_req)) { 4053 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4054 l2cap_build_conf_rsp(chan, rsp, 4055 L2CAP_CONF_REJECT, flags), rsp); 4056 goto unlock; 4057 } 4058 4059 /* Store config. */ 4060 memcpy(chan->conf_req + chan->conf_len, req->data, len); 4061 chan->conf_len += len; 4062 4063 if (flags & L2CAP_CONF_FLAG_CONTINUATION) { 4064 /* Incomplete config. Send empty response. */ 4065 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4066 l2cap_build_conf_rsp(chan, rsp, 4067 L2CAP_CONF_SUCCESS, flags), rsp); 4068 goto unlock; 4069 } 4070 4071 /* Complete config. */ 4072 len = l2cap_parse_conf_req(chan, rsp); 4073 if (len < 0) { 4074 l2cap_send_disconn_req(chan, ECONNRESET); 4075 goto unlock; 4076 } 4077 4078 chan->ident = cmd->ident; 4079 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 4080 chan->num_conf_rsp++; 4081 4082 /* Reset config buffer. */ 4083 chan->conf_len = 0; 4084 4085 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) 4086 goto unlock; 4087 4088 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 4089 set_default_fcs(chan); 4090 4091 if (chan->mode == L2CAP_MODE_ERTM || 4092 chan->mode == L2CAP_MODE_STREAMING) 4093 err = l2cap_ertm_init(chan); 4094 4095 if (err < 0) 4096 l2cap_send_disconn_req(chan, -err); 4097 else 4098 l2cap_chan_ready(chan); 4099 4100 goto unlock; 4101 } 4102 4103 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { 4104 u8 buf[64]; 4105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4106 l2cap_build_conf_req(chan, buf), buf); 4107 chan->num_conf_req++; 4108 } 4109 4110 /* Got Conf Rsp PENDING from remote side and assume we sent 4111 Conf Rsp PENDING in the code above */ 4112 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && 4113 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4114 4115 /* check compatibility */ 4116 4117 /* Send rsp for BR/EDR channel */ 4118 if (!chan->hs_hcon) 4119 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); 4120 else 4121 chan->ident = cmd->ident; 4122 } 4123 4124 unlock: 4125 l2cap_chan_unlock(chan); 4126 return err; 4127 } 4128 4129 static inline int l2cap_config_rsp(struct l2cap_conn *conn, 4130 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4131 u8 *data) 4132 { 4133 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 4134 u16 scid, flags, result; 4135 struct l2cap_chan *chan; 4136 int len = cmd_len - sizeof(*rsp); 4137 int err = 0; 4138 4139 if (cmd_len < sizeof(*rsp)) 4140 return -EPROTO; 4141 4142 scid = __le16_to_cpu(rsp->scid); 4143 flags = __le16_to_cpu(rsp->flags); 4144 result = __le16_to_cpu(rsp->result); 4145 4146 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, 4147 result, len); 4148 4149 chan = l2cap_get_chan_by_scid(conn, scid); 4150 if (!chan) 4151 return 0; 4152 4153 switch (result) { 4154 case L2CAP_CONF_SUCCESS: 4155 l2cap_conf_rfc_get(chan, rsp->data, len); 4156 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4157 break; 4158 4159 case L2CAP_CONF_PENDING: 4160 set_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4161 4162 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4163 char buf[64]; 4164 4165 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4166 buf, &result); 4167 if (len < 0) { 4168 l2cap_send_disconn_req(chan, ECONNRESET); 4169 goto done; 4170 } 4171 4172 if (!chan->hs_hcon) { 4173 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 4174 0); 4175 } else { 4176 if (l2cap_check_efs(chan)) { 4177 amp_create_logical_link(chan); 4178 chan->ident = cmd->ident; 4179 } 4180 } 4181 } 4182 goto done; 4183 4184 case L2CAP_CONF_UNACCEPT: 4185 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 4186 char req[64]; 4187 4188 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 4189 l2cap_send_disconn_req(chan, ECONNRESET); 4190 goto done; 4191 } 4192 4193 /* throw out any old stored conf requests */ 4194 result = L2CAP_CONF_SUCCESS; 4195 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4196 req, &result); 4197 if (len < 0) { 4198 l2cap_send_disconn_req(chan, ECONNRESET); 4199 goto done; 4200 } 4201 4202 l2cap_send_cmd(conn, l2cap_get_ident(conn), 4203 L2CAP_CONF_REQ, len, req); 4204 chan->num_conf_req++; 4205 if (result != L2CAP_CONF_SUCCESS) 4206 goto done; 4207 break; 4208 } 4209 4210 default: 4211 l2cap_chan_set_err(chan, ECONNRESET); 4212 4213 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 4214 l2cap_send_disconn_req(chan, ECONNRESET); 4215 goto done; 4216 } 4217 4218 if (flags & L2CAP_CONF_FLAG_CONTINUATION) 4219 goto done; 4220 4221 set_bit(CONF_INPUT_DONE, &chan->conf_state); 4222 4223 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 4224 set_default_fcs(chan); 4225 4226 if (chan->mode == L2CAP_MODE_ERTM || 4227 chan->mode == L2CAP_MODE_STREAMING) 4228 err = l2cap_ertm_init(chan); 4229 4230 if (err < 0) 4231 l2cap_send_disconn_req(chan, -err); 4232 else 4233 l2cap_chan_ready(chan); 4234 } 4235 4236 done: 4237 l2cap_chan_unlock(chan); 4238 return err; 4239 } 4240 4241 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, 4242 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4243 u8 *data) 4244 { 4245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4246 struct l2cap_disconn_rsp rsp; 4247 u16 dcid, scid; 4248 struct l2cap_chan *chan; 4249 4250 if (cmd_len != sizeof(*req)) 4251 return -EPROTO; 4252 4253 scid = __le16_to_cpu(req->scid); 4254 dcid = __le16_to_cpu(req->dcid); 4255 4256 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 4257 4258 mutex_lock(&conn->chan_lock); 4259 4260 chan = __l2cap_get_chan_by_scid(conn, dcid); 4261 if (!chan) { 4262 mutex_unlock(&conn->chan_lock); 4263 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); 4264 return 0; 4265 } 4266 4267 l2cap_chan_lock(chan); 4268 4269 rsp.dcid = cpu_to_le16(chan->scid); 4270 rsp.scid = cpu_to_le16(chan->dcid); 4271 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 4272 4273 chan->ops->set_shutdown(chan); 4274 4275 l2cap_chan_hold(chan); 4276 l2cap_chan_del(chan, ECONNRESET); 4277 4278 l2cap_chan_unlock(chan); 4279 4280 chan->ops->close(chan); 4281 l2cap_chan_put(chan); 4282 4283 mutex_unlock(&conn->chan_lock); 4284 4285 return 0; 4286 } 4287 4288 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, 4289 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4290 u8 *data) 4291 { 4292 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4293 u16 dcid, scid; 4294 struct l2cap_chan *chan; 4295 4296 if (cmd_len != sizeof(*rsp)) 4297 return -EPROTO; 4298 4299 scid = __le16_to_cpu(rsp->scid); 4300 dcid = __le16_to_cpu(rsp->dcid); 4301 4302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 4303 4304 mutex_lock(&conn->chan_lock); 4305 4306 chan = __l2cap_get_chan_by_scid(conn, scid); 4307 if (!chan) { 4308 mutex_unlock(&conn->chan_lock); 4309 return 0; 4310 } 4311 4312 l2cap_chan_lock(chan); 4313 4314 l2cap_chan_hold(chan); 4315 l2cap_chan_del(chan, 0); 4316 4317 l2cap_chan_unlock(chan); 4318 4319 chan->ops->close(chan); 4320 l2cap_chan_put(chan); 4321 4322 mutex_unlock(&conn->chan_lock); 4323 4324 return 0; 4325 } 4326 4327 static inline int l2cap_information_req(struct l2cap_conn *conn, 4328 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4329 u8 *data) 4330 { 4331 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4332 u16 type; 4333 4334 if (cmd_len != sizeof(*req)) 4335 return -EPROTO; 4336 4337 type = __le16_to_cpu(req->type); 4338 4339 BT_DBG("type 0x%4.4x", type); 4340 4341 if (type == L2CAP_IT_FEAT_MASK) { 4342 u8 buf[8]; 4343 u32 feat_mask = l2cap_feat_mask; 4344 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4345 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4346 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4347 if (!disable_ertm) 4348 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 4349 | L2CAP_FEAT_FCS; 4350 if (conn->local_fixed_chan & L2CAP_FC_A2MP) 4351 feat_mask |= L2CAP_FEAT_EXT_FLOW 4352 | L2CAP_FEAT_EXT_WINDOW; 4353 4354 put_unaligned_le32(feat_mask, rsp->data); 4355 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4356 buf); 4357 } else if (type == L2CAP_IT_FIXED_CHAN) { 4358 u8 buf[12]; 4359 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4360 4361 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4362 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4363 rsp->data[0] = conn->local_fixed_chan; 4364 memset(rsp->data + 1, 0, 7); 4365 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4366 buf); 4367 } else { 4368 struct l2cap_info_rsp rsp; 4369 rsp.type = cpu_to_le16(type); 4370 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 4371 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), 4372 &rsp); 4373 } 4374 4375 return 0; 4376 } 4377 4378 static inline int l2cap_information_rsp(struct l2cap_conn *conn, 4379 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4380 u8 *data) 4381 { 4382 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4383 u16 type, result; 4384 4385 if (cmd_len < sizeof(*rsp)) 4386 return -EPROTO; 4387 4388 type = __le16_to_cpu(rsp->type); 4389 result = __le16_to_cpu(rsp->result); 4390 4391 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 4392 4393 /* L2CAP Info req/rsp are unbound to channels, add extra checks */ 4394 if (cmd->ident != conn->info_ident || 4395 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 4396 return 0; 4397 4398 cancel_delayed_work(&conn->info_timer); 4399 4400 if (result != L2CAP_IR_SUCCESS) { 4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4402 conn->info_ident = 0; 4403 4404 l2cap_conn_start(conn); 4405 4406 return 0; 4407 } 4408 4409 switch (type) { 4410 case L2CAP_IT_FEAT_MASK: 4411 conn->feat_mask = get_unaligned_le32(rsp->data); 4412 4413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 4414 struct l2cap_info_req req; 4415 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4416 4417 conn->info_ident = l2cap_get_ident(conn); 4418 4419 l2cap_send_cmd(conn, conn->info_ident, 4420 L2CAP_INFO_REQ, sizeof(req), &req); 4421 } else { 4422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4423 conn->info_ident = 0; 4424 4425 l2cap_conn_start(conn); 4426 } 4427 break; 4428 4429 case L2CAP_IT_FIXED_CHAN: 4430 conn->remote_fixed_chan = rsp->data[0]; 4431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4432 conn->info_ident = 0; 4433 4434 l2cap_conn_start(conn); 4435 break; 4436 } 4437 4438 return 0; 4439 } 4440 4441 static int l2cap_create_channel_req(struct l2cap_conn *conn, 4442 struct l2cap_cmd_hdr *cmd, 4443 u16 cmd_len, void *data) 4444 { 4445 struct l2cap_create_chan_req *req = data; 4446 struct l2cap_create_chan_rsp rsp; 4447 struct l2cap_chan *chan; 4448 struct hci_dev *hdev; 4449 u16 psm, scid; 4450 4451 if (cmd_len != sizeof(*req)) 4452 return -EPROTO; 4453 4454 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) 4455 return -EINVAL; 4456 4457 psm = le16_to_cpu(req->psm); 4458 scid = le16_to_cpu(req->scid); 4459 4460 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); 4461 4462 /* For controller id 0 make BR/EDR connection */ 4463 if (req->amp_id == AMP_ID_BREDR) { 4464 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, 4465 req->amp_id); 4466 return 0; 4467 } 4468 4469 /* Validate AMP controller id */ 4470 hdev = hci_dev_get(req->amp_id); 4471 if (!hdev) 4472 goto error; 4473 4474 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { 4475 hci_dev_put(hdev); 4476 goto error; 4477 } 4478 4479 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, 4480 req->amp_id); 4481 if (chan) { 4482 struct amp_mgr *mgr = conn->hcon->amp_mgr; 4483 struct hci_conn *hs_hcon; 4484 4485 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, 4486 &conn->hcon->dst); 4487 if (!hs_hcon) { 4488 hci_dev_put(hdev); 4489 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, 4490 chan->dcid); 4491 return 0; 4492 } 4493 4494 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); 4495 4496 mgr->bredr_chan = chan; 4497 chan->hs_hcon = hs_hcon; 4498 chan->fcs = L2CAP_FCS_NONE; 4499 conn->mtu = hdev->block_mtu; 4500 } 4501 4502 hci_dev_put(hdev); 4503 4504 return 0; 4505 4506 error: 4507 rsp.dcid = 0; 4508 rsp.scid = cpu_to_le16(scid); 4509 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP); 4510 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4511 4512 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, 4513 sizeof(rsp), &rsp); 4514 4515 return 0; 4516 } 4517 4518 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) 4519 { 4520 struct l2cap_move_chan_req req; 4521 u8 ident; 4522 4523 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id); 4524 4525 ident = l2cap_get_ident(chan->conn); 4526 chan->ident = ident; 4527 4528 req.icid = cpu_to_le16(chan->scid); 4529 req.dest_amp_id = dest_amp_id; 4530 4531 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), 4532 &req); 4533 4534 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); 4535 } 4536 4537 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result) 4538 { 4539 struct l2cap_move_chan_rsp rsp; 4540 4541 BT_DBG("chan %p, result 0x%4.4x", chan, result); 4542 4543 rsp.icid = cpu_to_le16(chan->dcid); 4544 rsp.result = cpu_to_le16(result); 4545 4546 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP, 4547 sizeof(rsp), &rsp); 4548 } 4549 4550 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result) 4551 { 4552 struct l2cap_move_chan_cfm cfm; 4553 4554 BT_DBG("chan %p, result 0x%4.4x", chan, result); 4555 4556 chan->ident = l2cap_get_ident(chan->conn); 4557 4558 cfm.icid = cpu_to_le16(chan->scid); 4559 cfm.result = cpu_to_le16(result); 4560 4561 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM, 4562 sizeof(cfm), &cfm); 4563 4564 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); 4565 } 4566 4567 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid) 4568 { 4569 struct l2cap_move_chan_cfm cfm; 4570 4571 BT_DBG("conn %p, icid 0x%4.4x", conn, icid); 4572 4573 cfm.icid = cpu_to_le16(icid); 4574 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED); 4575 4576 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM, 4577 sizeof(cfm), &cfm); 4578 } 4579 4580 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, 4581 u16 icid) 4582 { 4583 struct l2cap_move_chan_cfm_rsp rsp; 4584 4585 BT_DBG("icid 0x%4.4x", icid); 4586 4587 rsp.icid = cpu_to_le16(icid); 4588 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); 4589 } 4590 4591 static void __release_logical_link(struct l2cap_chan *chan) 4592 { 4593 chan->hs_hchan = NULL; 4594 chan->hs_hcon = NULL; 4595 4596 /* Placeholder - release the logical link */ 4597 } 4598 4599 static void l2cap_logical_fail(struct l2cap_chan *chan) 4600 { 4601 /* Logical link setup failed */ 4602 if (chan->state != BT_CONNECTED) { 4603 /* Create channel failure, disconnect */ 4604 l2cap_send_disconn_req(chan, ECONNRESET); 4605 return; 4606 } 4607 4608 switch (chan->move_role) { 4609 case L2CAP_MOVE_ROLE_RESPONDER: 4610 l2cap_move_done(chan); 4611 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP); 4612 break; 4613 case L2CAP_MOVE_ROLE_INITIATOR: 4614 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP || 4615 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) { 4616 /* Remote has only sent pending or 4617 * success responses, clean up 4618 */ 4619 l2cap_move_done(chan); 4620 } 4621 4622 /* Other amp move states imply that the move 4623 * has already aborted 4624 */ 4625 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 4626 break; 4627 } 4628 } 4629 4630 static void l2cap_logical_finish_create(struct l2cap_chan *chan, 4631 struct hci_chan *hchan) 4632 { 4633 struct l2cap_conf_rsp rsp; 4634 4635 chan->hs_hchan = hchan; 4636 chan->hs_hcon->l2cap_data = chan->conn; 4637 4638 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0); 4639 4640 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 4641 int err; 4642 4643 set_default_fcs(chan); 4644 4645 err = l2cap_ertm_init(chan); 4646 if (err < 0) 4647 l2cap_send_disconn_req(chan, -err); 4648 else 4649 l2cap_chan_ready(chan); 4650 } 4651 } 4652 4653 static void l2cap_logical_finish_move(struct l2cap_chan *chan, 4654 struct hci_chan *hchan) 4655 { 4656 chan->hs_hcon = hchan->conn; 4657 chan->hs_hcon->l2cap_data = chan->conn; 4658 4659 BT_DBG("move_state %d", chan->move_state); 4660 4661 switch (chan->move_state) { 4662 case L2CAP_MOVE_WAIT_LOGICAL_COMP: 4663 /* Move confirm will be sent after a success 4664 * response is received 4665 */ 4666 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; 4667 break; 4668 case L2CAP_MOVE_WAIT_LOGICAL_CFM: 4669 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4670 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; 4671 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { 4672 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; 4673 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); 4674 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { 4675 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; 4676 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); 4677 } 4678 break; 4679 default: 4680 /* Move was not in expected state, free the channel */ 4681 __release_logical_link(chan); 4682 4683 chan->move_state = L2CAP_MOVE_STABLE; 4684 } 4685 } 4686 4687 /* Call with chan locked */ 4688 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, 4689 u8 status) 4690 { 4691 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status); 4692 4693 if (status) { 4694 l2cap_logical_fail(chan); 4695 __release_logical_link(chan); 4696 return; 4697 } 4698 4699 if (chan->state != BT_CONNECTED) { 4700 /* Ignore logical link if channel is on BR/EDR */ 4701 if (chan->local_amp_id != AMP_ID_BREDR) 4702 l2cap_logical_finish_create(chan, hchan); 4703 } else { 4704 l2cap_logical_finish_move(chan, hchan); 4705 } 4706 } 4707 4708 void l2cap_move_start(struct l2cap_chan *chan) 4709 { 4710 BT_DBG("chan %p", chan); 4711 4712 if (chan->local_amp_id == AMP_ID_BREDR) { 4713 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED) 4714 return; 4715 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; 4716 chan->move_state = L2CAP_MOVE_WAIT_PREPARE; 4717 /* Placeholder - start physical link setup */ 4718 } else { 4719 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; 4720 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; 4721 chan->move_id = 0; 4722 l2cap_move_setup(chan); 4723 l2cap_send_move_chan_req(chan, 0); 4724 } 4725 } 4726 4727 static void l2cap_do_create(struct l2cap_chan *chan, int result, 4728 u8 local_amp_id, u8 remote_amp_id) 4729 { 4730 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state), 4731 local_amp_id, remote_amp_id); 4732 4733 chan->fcs = L2CAP_FCS_NONE; 4734 4735 /* Outgoing channel on AMP */ 4736 if (chan->state == BT_CONNECT) { 4737 if (result == L2CAP_CR_SUCCESS) { 4738 chan->local_amp_id = local_amp_id; 4739 l2cap_send_create_chan_req(chan, remote_amp_id); 4740 } else { 4741 /* Revert to BR/EDR connect */ 4742 l2cap_send_conn_req(chan); 4743 } 4744 4745 return; 4746 } 4747 4748 /* Incoming channel on AMP */ 4749 if (__l2cap_no_conn_pending(chan)) { 4750 struct l2cap_conn_rsp rsp; 4751 char buf[128]; 4752 rsp.scid = cpu_to_le16(chan->dcid); 4753 rsp.dcid = cpu_to_le16(chan->scid); 4754 4755 if (result == L2CAP_CR_SUCCESS) { 4756 /* Send successful response */ 4757 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 4758 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4759 } else { 4760 /* Send negative response */ 4761 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM); 4762 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4763 } 4764 4765 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP, 4766 sizeof(rsp), &rsp); 4767 4768 if (result == L2CAP_CR_SUCCESS) { 4769 l2cap_state_change(chan, BT_CONFIG); 4770 set_bit(CONF_REQ_SENT, &chan->conf_state); 4771 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), 4772 L2CAP_CONF_REQ, 4773 l2cap_build_conf_req(chan, buf), buf); 4774 chan->num_conf_req++; 4775 } 4776 } 4777 } 4778 4779 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id, 4780 u8 remote_amp_id) 4781 { 4782 l2cap_move_setup(chan); 4783 chan->move_id = local_amp_id; 4784 chan->move_state = L2CAP_MOVE_WAIT_RSP; 4785 4786 l2cap_send_move_chan_req(chan, remote_amp_id); 4787 } 4788 4789 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result) 4790 { 4791 struct hci_chan *hchan = NULL; 4792 4793 /* Placeholder - get hci_chan for logical link */ 4794 4795 if (hchan) { 4796 if (hchan->state == BT_CONNECTED) { 4797 /* Logical link is ready to go */ 4798 chan->hs_hcon = hchan->conn; 4799 chan->hs_hcon->l2cap_data = chan->conn; 4800 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; 4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); 4802 4803 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); 4804 } else { 4805 /* Wait for logical link to be ready */ 4806 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; 4807 } 4808 } else { 4809 /* Logical link not available */ 4810 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED); 4811 } 4812 } 4813 4814 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result) 4815 { 4816 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { 4817 u8 rsp_result; 4818 if (result == -EINVAL) 4819 rsp_result = L2CAP_MR_BAD_ID; 4820 else 4821 rsp_result = L2CAP_MR_NOT_ALLOWED; 4822 4823 l2cap_send_move_chan_rsp(chan, rsp_result); 4824 } 4825 4826 chan->move_role = L2CAP_MOVE_ROLE_NONE; 4827 chan->move_state = L2CAP_MOVE_STABLE; 4828 4829 /* Restart data transmission */ 4830 l2cap_ertm_send(chan); 4831 } 4832 4833 /* Invoke with locked chan */ 4834 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) 4835 { 4836 u8 local_amp_id = chan->local_amp_id; 4837 u8 remote_amp_id = chan->remote_amp_id; 4838 4839 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", 4840 chan, result, local_amp_id, remote_amp_id); 4841 4842 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) { 4843 l2cap_chan_unlock(chan); 4844 return; 4845 } 4846 4847 if (chan->state != BT_CONNECTED) { 4848 l2cap_do_create(chan, result, local_amp_id, remote_amp_id); 4849 } else if (result != L2CAP_MR_SUCCESS) { 4850 l2cap_do_move_cancel(chan, result); 4851 } else { 4852 switch (chan->move_role) { 4853 case L2CAP_MOVE_ROLE_INITIATOR: 4854 l2cap_do_move_initiate(chan, local_amp_id, 4855 remote_amp_id); 4856 break; 4857 case L2CAP_MOVE_ROLE_RESPONDER: 4858 l2cap_do_move_respond(chan, result); 4859 break; 4860 default: 4861 l2cap_do_move_cancel(chan, result); 4862 break; 4863 } 4864 } 4865 } 4866 4867 static inline int l2cap_move_channel_req(struct l2cap_conn *conn, 4868 struct l2cap_cmd_hdr *cmd, 4869 u16 cmd_len, void *data) 4870 { 4871 struct l2cap_move_chan_req *req = data; 4872 struct l2cap_move_chan_rsp rsp; 4873 struct l2cap_chan *chan; 4874 u16 icid = 0; 4875 u16 result = L2CAP_MR_NOT_ALLOWED; 4876 4877 if (cmd_len != sizeof(*req)) 4878 return -EPROTO; 4879 4880 icid = le16_to_cpu(req->icid); 4881 4882 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); 4883 4884 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) 4885 return -EINVAL; 4886 4887 chan = l2cap_get_chan_by_dcid(conn, icid); 4888 if (!chan) { 4889 rsp.icid = cpu_to_le16(icid); 4890 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED); 4891 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP, 4892 sizeof(rsp), &rsp); 4893 return 0; 4894 } 4895 4896 chan->ident = cmd->ident; 4897 4898 if (chan->scid < L2CAP_CID_DYN_START || 4899 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY || 4900 (chan->mode != L2CAP_MODE_ERTM && 4901 chan->mode != L2CAP_MODE_STREAMING)) { 4902 result = L2CAP_MR_NOT_ALLOWED; 4903 goto send_move_response; 4904 } 4905 4906 if (chan->local_amp_id == req->dest_amp_id) { 4907 result = L2CAP_MR_SAME_ID; 4908 goto send_move_response; 4909 } 4910 4911 if (req->dest_amp_id != AMP_ID_BREDR) { 4912 struct hci_dev *hdev; 4913 hdev = hci_dev_get(req->dest_amp_id); 4914 if (!hdev || hdev->dev_type != HCI_AMP || 4915 !test_bit(HCI_UP, &hdev->flags)) { 4916 if (hdev) 4917 hci_dev_put(hdev); 4918 4919 result = L2CAP_MR_BAD_ID; 4920 goto send_move_response; 4921 } 4922 hci_dev_put(hdev); 4923 } 4924 4925 /* Detect a move collision. Only send a collision response 4926 * if this side has "lost", otherwise proceed with the move. 4927 * The winner has the larger bd_addr. 4928 */ 4929 if ((__chan_is_moving(chan) || 4930 chan->move_role != L2CAP_MOVE_ROLE_NONE) && 4931 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) { 4932 result = L2CAP_MR_COLLISION; 4933 goto send_move_response; 4934 } 4935 4936 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; 4937 l2cap_move_setup(chan); 4938 chan->move_id = req->dest_amp_id; 4939 icid = chan->dcid; 4940 4941 if (req->dest_amp_id == AMP_ID_BREDR) { 4942 /* Moving to BR/EDR */ 4943 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4944 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; 4945 result = L2CAP_MR_PEND; 4946 } else { 4947 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; 4948 result = L2CAP_MR_SUCCESS; 4949 } 4950 } else { 4951 chan->move_state = L2CAP_MOVE_WAIT_PREPARE; 4952 /* Placeholder - uncomment when amp functions are available */ 4953 /*amp_accept_physical(chan, req->dest_amp_id);*/ 4954 result = L2CAP_MR_PEND; 4955 } 4956 4957 send_move_response: 4958 l2cap_send_move_chan_rsp(chan, result); 4959 4960 l2cap_chan_unlock(chan); 4961 4962 return 0; 4963 } 4964 4965 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) 4966 { 4967 struct l2cap_chan *chan; 4968 struct hci_chan *hchan = NULL; 4969 4970 chan = l2cap_get_chan_by_scid(conn, icid); 4971 if (!chan) { 4972 l2cap_send_move_chan_cfm_icid(conn, icid); 4973 return; 4974 } 4975 4976 __clear_chan_timer(chan); 4977 if (result == L2CAP_MR_PEND) 4978 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT); 4979 4980 switch (chan->move_state) { 4981 case L2CAP_MOVE_WAIT_LOGICAL_COMP: 4982 /* Move confirm will be sent when logical link 4983 * is complete. 4984 */ 4985 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; 4986 break; 4987 case L2CAP_MOVE_WAIT_RSP_SUCCESS: 4988 if (result == L2CAP_MR_PEND) { 4989 break; 4990 } else if (test_bit(CONN_LOCAL_BUSY, 4991 &chan->conn_state)) { 4992 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; 4993 } else { 4994 /* Logical link is up or moving to BR/EDR, 4995 * proceed with move 4996 */ 4997 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; 4998 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); 4999 } 5000 break; 5001 case L2CAP_MOVE_WAIT_RSP: 5002 /* Moving to AMP */ 5003 if (result == L2CAP_MR_SUCCESS) { 5004 /* Remote is ready, send confirm immediately 5005 * after logical link is ready 5006 */ 5007 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; 5008 } else { 5009 /* Both logical link and move success 5010 * are required to confirm 5011 */ 5012 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP; 5013 } 5014 5015 /* Placeholder - get hci_chan for logical link */ 5016 if (!hchan) { 5017 /* Logical link not available */ 5018 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5019 break; 5020 } 5021 5022 /* If the logical link is not yet connected, do not 5023 * send confirmation. 5024 */ 5025 if (hchan->state != BT_CONNECTED) 5026 break; 5027 5028 /* Logical link is already ready to go */ 5029 5030 chan->hs_hcon = hchan->conn; 5031 chan->hs_hcon->l2cap_data = chan->conn; 5032 5033 if (result == L2CAP_MR_SUCCESS) { 5034 /* Can confirm now */ 5035 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); 5036 } else { 5037 /* Now only need move success 5038 * to confirm 5039 */ 5040 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; 5041 } 5042 5043 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); 5044 break; 5045 default: 5046 /* Any other amp move state means the move failed. */ 5047 chan->move_id = chan->local_amp_id; 5048 l2cap_move_done(chan); 5049 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5050 } 5051 5052 l2cap_chan_unlock(chan); 5053 } 5054 5055 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, 5056 u16 result) 5057 { 5058 struct l2cap_chan *chan; 5059 5060 chan = l2cap_get_chan_by_ident(conn, ident); 5061 if (!chan) { 5062 /* Could not locate channel, icid is best guess */ 5063 l2cap_send_move_chan_cfm_icid(conn, icid); 5064 return; 5065 } 5066 5067 __clear_chan_timer(chan); 5068 5069 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { 5070 if (result == L2CAP_MR_COLLISION) { 5071 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; 5072 } else { 5073 /* Cleanup - cancel move */ 5074 chan->move_id = chan->local_amp_id; 5075 l2cap_move_done(chan); 5076 } 5077 } 5078 5079 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); 5080 5081 l2cap_chan_unlock(chan); 5082 } 5083 5084 static int l2cap_move_channel_rsp(struct l2cap_conn *conn, 5085 struct l2cap_cmd_hdr *cmd, 5086 u16 cmd_len, void *data) 5087 { 5088 struct l2cap_move_chan_rsp *rsp = data; 5089 u16 icid, result; 5090 5091 if (cmd_len != sizeof(*rsp)) 5092 return -EPROTO; 5093 5094 icid = le16_to_cpu(rsp->icid); 5095 result = le16_to_cpu(rsp->result); 5096 5097 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 5098 5099 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND) 5100 l2cap_move_continue(conn, icid, result); 5101 else 5102 l2cap_move_fail(conn, cmd->ident, icid, result); 5103 5104 return 0; 5105 } 5106 5107 static int l2cap_move_channel_confirm(struct l2cap_conn *conn, 5108 struct l2cap_cmd_hdr *cmd, 5109 u16 cmd_len, void *data) 5110 { 5111 struct l2cap_move_chan_cfm *cfm = data; 5112 struct l2cap_chan *chan; 5113 u16 icid, result; 5114 5115 if (cmd_len != sizeof(*cfm)) 5116 return -EPROTO; 5117 5118 icid = le16_to_cpu(cfm->icid); 5119 result = le16_to_cpu(cfm->result); 5120 5121 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 5122 5123 chan = l2cap_get_chan_by_dcid(conn, icid); 5124 if (!chan) { 5125 /* Spec requires a response even if the icid was not found */ 5126 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 5127 return 0; 5128 } 5129 5130 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) { 5131 if (result == L2CAP_MC_CONFIRMED) { 5132 chan->local_amp_id = chan->move_id; 5133 if (chan->local_amp_id == AMP_ID_BREDR) 5134 __release_logical_link(chan); 5135 } else { 5136 chan->move_id = chan->local_amp_id; 5137 } 5138 5139 l2cap_move_done(chan); 5140 } 5141 5142 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 5143 5144 l2cap_chan_unlock(chan); 5145 5146 return 0; 5147 } 5148 5149 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, 5150 struct l2cap_cmd_hdr *cmd, 5151 u16 cmd_len, void *data) 5152 { 5153 struct l2cap_move_chan_cfm_rsp *rsp = data; 5154 struct l2cap_chan *chan; 5155 u16 icid; 5156 5157 if (cmd_len != sizeof(*rsp)) 5158 return -EPROTO; 5159 5160 icid = le16_to_cpu(rsp->icid); 5161 5162 BT_DBG("icid 0x%4.4x", icid); 5163 5164 chan = l2cap_get_chan_by_scid(conn, icid); 5165 if (!chan) 5166 return 0; 5167 5168 __clear_chan_timer(chan); 5169 5170 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) { 5171 chan->local_amp_id = chan->move_id; 5172 5173 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan) 5174 __release_logical_link(chan); 5175 5176 l2cap_move_done(chan); 5177 } 5178 5179 l2cap_chan_unlock(chan); 5180 5181 return 0; 5182 } 5183 5184 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 5185 struct l2cap_cmd_hdr *cmd, 5186 u16 cmd_len, u8 *data) 5187 { 5188 struct hci_conn *hcon = conn->hcon; 5189 struct l2cap_conn_param_update_req *req; 5190 struct l2cap_conn_param_update_rsp rsp; 5191 u16 min, max, latency, to_multiplier; 5192 int err; 5193 5194 if (hcon->role != HCI_ROLE_MASTER) 5195 return -EINVAL; 5196 5197 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 5198 return -EPROTO; 5199 5200 req = (struct l2cap_conn_param_update_req *) data; 5201 min = __le16_to_cpu(req->min); 5202 max = __le16_to_cpu(req->max); 5203 latency = __le16_to_cpu(req->latency); 5204 to_multiplier = __le16_to_cpu(req->to_multiplier); 5205 5206 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", 5207 min, max, latency, to_multiplier); 5208 5209 memset(&rsp, 0, sizeof(rsp)); 5210 5211 err = hci_check_conn_params(min, max, latency, to_multiplier); 5212 if (err) 5213 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 5214 else 5215 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 5216 5217 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 5218 sizeof(rsp), &rsp); 5219 5220 if (!err) { 5221 u8 store_hint; 5222 5223 store_hint = hci_le_conn_update(hcon, min, max, latency, 5224 to_multiplier); 5225 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type, 5226 store_hint, min, max, latency, 5227 to_multiplier); 5228 5229 } 5230 5231 return 0; 5232 } 5233 5234 static int l2cap_le_connect_rsp(struct l2cap_conn *conn, 5235 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5236 u8 *data) 5237 { 5238 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; 5239 struct hci_conn *hcon = conn->hcon; 5240 u16 dcid, mtu, mps, credits, result; 5241 struct l2cap_chan *chan; 5242 int err, sec_level; 5243 5244 if (cmd_len < sizeof(*rsp)) 5245 return -EPROTO; 5246 5247 dcid = __le16_to_cpu(rsp->dcid); 5248 mtu = __le16_to_cpu(rsp->mtu); 5249 mps = __le16_to_cpu(rsp->mps); 5250 credits = __le16_to_cpu(rsp->credits); 5251 result = __le16_to_cpu(rsp->result); 5252 5253 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23)) 5254 return -EPROTO; 5255 5256 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", 5257 dcid, mtu, mps, credits, result); 5258 5259 mutex_lock(&conn->chan_lock); 5260 5261 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 5262 if (!chan) { 5263 err = -EBADSLT; 5264 goto unlock; 5265 } 5266 5267 err = 0; 5268 5269 l2cap_chan_lock(chan); 5270 5271 switch (result) { 5272 case L2CAP_CR_SUCCESS: 5273 chan->ident = 0; 5274 chan->dcid = dcid; 5275 chan->omtu = mtu; 5276 chan->remote_mps = mps; 5277 chan->tx_credits = credits; 5278 l2cap_chan_ready(chan); 5279 break; 5280 5281 case L2CAP_CR_AUTHENTICATION: 5282 case L2CAP_CR_ENCRYPTION: 5283 /* If we already have MITM protection we can't do 5284 * anything. 5285 */ 5286 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 5287 l2cap_chan_del(chan, ECONNREFUSED); 5288 break; 5289 } 5290 5291 sec_level = hcon->sec_level + 1; 5292 if (chan->sec_level < sec_level) 5293 chan->sec_level = sec_level; 5294 5295 /* We'll need to send a new Connect Request */ 5296 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); 5297 5298 smp_conn_security(hcon, chan->sec_level); 5299 break; 5300 5301 default: 5302 l2cap_chan_del(chan, ECONNREFUSED); 5303 break; 5304 } 5305 5306 l2cap_chan_unlock(chan); 5307 5308 unlock: 5309 mutex_unlock(&conn->chan_lock); 5310 5311 return err; 5312 } 5313 5314 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, 5315 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5316 u8 *data) 5317 { 5318 int err = 0; 5319 5320 switch (cmd->code) { 5321 case L2CAP_COMMAND_REJ: 5322 l2cap_command_rej(conn, cmd, cmd_len, data); 5323 break; 5324 5325 case L2CAP_CONN_REQ: 5326 err = l2cap_connect_req(conn, cmd, cmd_len, data); 5327 break; 5328 5329 case L2CAP_CONN_RSP: 5330 case L2CAP_CREATE_CHAN_RSP: 5331 l2cap_connect_create_rsp(conn, cmd, cmd_len, data); 5332 break; 5333 5334 case L2CAP_CONF_REQ: 5335 err = l2cap_config_req(conn, cmd, cmd_len, data); 5336 break; 5337 5338 case L2CAP_CONF_RSP: 5339 l2cap_config_rsp(conn, cmd, cmd_len, data); 5340 break; 5341 5342 case L2CAP_DISCONN_REQ: 5343 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 5344 break; 5345 5346 case L2CAP_DISCONN_RSP: 5347 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 5348 break; 5349 5350 case L2CAP_ECHO_REQ: 5351 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); 5352 break; 5353 5354 case L2CAP_ECHO_RSP: 5355 break; 5356 5357 case L2CAP_INFO_REQ: 5358 err = l2cap_information_req(conn, cmd, cmd_len, data); 5359 break; 5360 5361 case L2CAP_INFO_RSP: 5362 l2cap_information_rsp(conn, cmd, cmd_len, data); 5363 break; 5364 5365 case L2CAP_CREATE_CHAN_REQ: 5366 err = l2cap_create_channel_req(conn, cmd, cmd_len, data); 5367 break; 5368 5369 case L2CAP_MOVE_CHAN_REQ: 5370 err = l2cap_move_channel_req(conn, cmd, cmd_len, data); 5371 break; 5372 5373 case L2CAP_MOVE_CHAN_RSP: 5374 l2cap_move_channel_rsp(conn, cmd, cmd_len, data); 5375 break; 5376 5377 case L2CAP_MOVE_CHAN_CFM: 5378 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); 5379 break; 5380 5381 case L2CAP_MOVE_CHAN_CFM_RSP: 5382 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); 5383 break; 5384 5385 default: 5386 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); 5387 err = -EINVAL; 5388 break; 5389 } 5390 5391 return err; 5392 } 5393 5394 static int l2cap_le_connect_req(struct l2cap_conn *conn, 5395 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5396 u8 *data) 5397 { 5398 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; 5399 struct l2cap_le_conn_rsp rsp; 5400 struct l2cap_chan *chan, *pchan; 5401 u16 dcid, scid, credits, mtu, mps; 5402 __le16 psm; 5403 u8 result; 5404 5405 if (cmd_len != sizeof(*req)) 5406 return -EPROTO; 5407 5408 scid = __le16_to_cpu(req->scid); 5409 mtu = __le16_to_cpu(req->mtu); 5410 mps = __le16_to_cpu(req->mps); 5411 psm = req->psm; 5412 dcid = 0; 5413 credits = 0; 5414 5415 if (mtu < 23 || mps < 23) 5416 return -EPROTO; 5417 5418 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), 5419 scid, mtu, mps); 5420 5421 /* Check if we have socket listening on psm */ 5422 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 5423 &conn->hcon->dst, LE_LINK); 5424 if (!pchan) { 5425 result = L2CAP_CR_BAD_PSM; 5426 chan = NULL; 5427 goto response; 5428 } 5429 5430 mutex_lock(&conn->chan_lock); 5431 l2cap_chan_lock(pchan); 5432 5433 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 5434 SMP_ALLOW_STK)) { 5435 result = L2CAP_CR_AUTHENTICATION; 5436 chan = NULL; 5437 goto response_unlock; 5438 } 5439 5440 /* Check if we already have channel with that dcid */ 5441 if (__l2cap_get_chan_by_dcid(conn, scid)) { 5442 result = L2CAP_CR_NO_MEM; 5443 chan = NULL; 5444 goto response_unlock; 5445 } 5446 5447 chan = pchan->ops->new_connection(pchan); 5448 if (!chan) { 5449 result = L2CAP_CR_NO_MEM; 5450 goto response_unlock; 5451 } 5452 5453 l2cap_le_flowctl_init(chan); 5454 5455 bacpy(&chan->src, &conn->hcon->src); 5456 bacpy(&chan->dst, &conn->hcon->dst); 5457 chan->src_type = bdaddr_src_type(conn->hcon); 5458 chan->dst_type = bdaddr_dst_type(conn->hcon); 5459 chan->psm = psm; 5460 chan->dcid = scid; 5461 chan->omtu = mtu; 5462 chan->remote_mps = mps; 5463 chan->tx_credits = __le16_to_cpu(req->credits); 5464 5465 __l2cap_chan_add(conn, chan); 5466 dcid = chan->scid; 5467 credits = chan->rx_credits; 5468 5469 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 5470 5471 chan->ident = cmd->ident; 5472 5473 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 5474 l2cap_state_change(chan, BT_CONNECT2); 5475 /* The following result value is actually not defined 5476 * for LE CoC but we use it to let the function know 5477 * that it should bail out after doing its cleanup 5478 * instead of sending a response. 5479 */ 5480 result = L2CAP_CR_PEND; 5481 chan->ops->defer(chan); 5482 } else { 5483 l2cap_chan_ready(chan); 5484 result = L2CAP_CR_SUCCESS; 5485 } 5486 5487 response_unlock: 5488 l2cap_chan_unlock(pchan); 5489 mutex_unlock(&conn->chan_lock); 5490 l2cap_chan_put(pchan); 5491 5492 if (result == L2CAP_CR_PEND) 5493 return 0; 5494 5495 response: 5496 if (chan) { 5497 rsp.mtu = cpu_to_le16(chan->imtu); 5498 rsp.mps = cpu_to_le16(chan->mps); 5499 } else { 5500 rsp.mtu = 0; 5501 rsp.mps = 0; 5502 } 5503 5504 rsp.dcid = cpu_to_le16(dcid); 5505 rsp.credits = cpu_to_le16(credits); 5506 rsp.result = cpu_to_le16(result); 5507 5508 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); 5509 5510 return 0; 5511 } 5512 5513 static inline int l2cap_le_credits(struct l2cap_conn *conn, 5514 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5515 u8 *data) 5516 { 5517 struct l2cap_le_credits *pkt; 5518 struct l2cap_chan *chan; 5519 u16 cid, credits, max_credits; 5520 5521 if (cmd_len != sizeof(*pkt)) 5522 return -EPROTO; 5523 5524 pkt = (struct l2cap_le_credits *) data; 5525 cid = __le16_to_cpu(pkt->cid); 5526 credits = __le16_to_cpu(pkt->credits); 5527 5528 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); 5529 5530 chan = l2cap_get_chan_by_dcid(conn, cid); 5531 if (!chan) 5532 return -EBADSLT; 5533 5534 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; 5535 if (credits > max_credits) { 5536 BT_ERR("LE credits overflow"); 5537 l2cap_send_disconn_req(chan, ECONNRESET); 5538 l2cap_chan_unlock(chan); 5539 5540 /* Return 0 so that we don't trigger an unnecessary 5541 * command reject packet. 5542 */ 5543 return 0; 5544 } 5545 5546 chan->tx_credits += credits; 5547 5548 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { 5549 l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); 5550 chan->tx_credits--; 5551 } 5552 5553 if (chan->tx_credits) 5554 chan->ops->resume(chan); 5555 5556 l2cap_chan_unlock(chan); 5557 5558 return 0; 5559 } 5560 5561 static inline int l2cap_le_command_rej(struct l2cap_conn *conn, 5562 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5563 u8 *data) 5564 { 5565 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 5566 struct l2cap_chan *chan; 5567 5568 if (cmd_len < sizeof(*rej)) 5569 return -EPROTO; 5570 5571 mutex_lock(&conn->chan_lock); 5572 5573 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 5574 if (!chan) 5575 goto done; 5576 5577 l2cap_chan_lock(chan); 5578 l2cap_chan_del(chan, ECONNREFUSED); 5579 l2cap_chan_unlock(chan); 5580 5581 done: 5582 mutex_unlock(&conn->chan_lock); 5583 return 0; 5584 } 5585 5586 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, 5587 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5588 u8 *data) 5589 { 5590 int err = 0; 5591 5592 switch (cmd->code) { 5593 case L2CAP_COMMAND_REJ: 5594 l2cap_le_command_rej(conn, cmd, cmd_len, data); 5595 break; 5596 5597 case L2CAP_CONN_PARAM_UPDATE_REQ: 5598 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); 5599 break; 5600 5601 case L2CAP_CONN_PARAM_UPDATE_RSP: 5602 break; 5603 5604 case L2CAP_LE_CONN_RSP: 5605 l2cap_le_connect_rsp(conn, cmd, cmd_len, data); 5606 break; 5607 5608 case L2CAP_LE_CONN_REQ: 5609 err = l2cap_le_connect_req(conn, cmd, cmd_len, data); 5610 break; 5611 5612 case L2CAP_LE_CREDITS: 5613 err = l2cap_le_credits(conn, cmd, cmd_len, data); 5614 break; 5615 5616 case L2CAP_DISCONN_REQ: 5617 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 5618 break; 5619 5620 case L2CAP_DISCONN_RSP: 5621 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 5622 break; 5623 5624 default: 5625 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); 5626 err = -EINVAL; 5627 break; 5628 } 5629 5630 return err; 5631 } 5632 5633 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, 5634 struct sk_buff *skb) 5635 { 5636 struct hci_conn *hcon = conn->hcon; 5637 struct l2cap_cmd_hdr *cmd; 5638 u16 len; 5639 int err; 5640 5641 if (hcon->type != LE_LINK) 5642 goto drop; 5643 5644 if (skb->len < L2CAP_CMD_HDR_SIZE) 5645 goto drop; 5646 5647 cmd = (void *) skb->data; 5648 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 5649 5650 len = le16_to_cpu(cmd->len); 5651 5652 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); 5653 5654 if (len != skb->len || !cmd->ident) { 5655 BT_DBG("corrupted command"); 5656 goto drop; 5657 } 5658 5659 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); 5660 if (err) { 5661 struct l2cap_cmd_rej_unk rej; 5662 5663 BT_ERR("Wrong link type (%d)", err); 5664 5665 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5666 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 5667 sizeof(rej), &rej); 5668 } 5669 5670 drop: 5671 kfree_skb(skb); 5672 } 5673 5674 static inline void l2cap_sig_channel(struct l2cap_conn *conn, 5675 struct sk_buff *skb) 5676 { 5677 struct hci_conn *hcon = conn->hcon; 5678 u8 *data = skb->data; 5679 int len = skb->len; 5680 struct l2cap_cmd_hdr cmd; 5681 int err; 5682 5683 l2cap_raw_recv(conn, skb); 5684 5685 if (hcon->type != ACL_LINK) 5686 goto drop; 5687 5688 while (len >= L2CAP_CMD_HDR_SIZE) { 5689 u16 cmd_len; 5690 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); 5691 data += L2CAP_CMD_HDR_SIZE; 5692 len -= L2CAP_CMD_HDR_SIZE; 5693 5694 cmd_len = le16_to_cpu(cmd.len); 5695 5696 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, 5697 cmd.ident); 5698 5699 if (cmd_len > len || !cmd.ident) { 5700 BT_DBG("corrupted command"); 5701 break; 5702 } 5703 5704 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data); 5705 if (err) { 5706 struct l2cap_cmd_rej_unk rej; 5707 5708 BT_ERR("Wrong link type (%d)", err); 5709 5710 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5711 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, 5712 sizeof(rej), &rej); 5713 } 5714 5715 data += cmd_len; 5716 len -= cmd_len; 5717 } 5718 5719 drop: 5720 kfree_skb(skb); 5721 } 5722 5723 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) 5724 { 5725 u16 our_fcs, rcv_fcs; 5726 int hdr_size; 5727 5728 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 5729 hdr_size = L2CAP_EXT_HDR_SIZE; 5730 else 5731 hdr_size = L2CAP_ENH_HDR_SIZE; 5732 5733 if (chan->fcs == L2CAP_FCS_CRC16) { 5734 skb_trim(skb, skb->len - L2CAP_FCS_SIZE); 5735 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 5736 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 5737 5738 if (our_fcs != rcv_fcs) 5739 return -EBADMSG; 5740 } 5741 return 0; 5742 } 5743 5744 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 5745 { 5746 struct l2cap_ctrl control; 5747 5748 BT_DBG("chan %p", chan); 5749 5750 memset(&control, 0, sizeof(control)); 5751 control.sframe = 1; 5752 control.final = 1; 5753 control.reqseq = chan->buffer_seq; 5754 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5755 5756 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5757 control.super = L2CAP_SUPER_RNR; 5758 l2cap_send_sframe(chan, &control); 5759 } 5760 5761 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 5762 chan->unacked_frames > 0) 5763 __set_retrans_timer(chan); 5764 5765 /* Send pending iframes */ 5766 l2cap_ertm_send(chan); 5767 5768 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 5769 test_bit(CONN_SEND_FBIT, &chan->conn_state)) { 5770 /* F-bit wasn't sent in an s-frame or i-frame yet, so 5771 * send it now. 5772 */ 5773 control.super = L2CAP_SUPER_RR; 5774 l2cap_send_sframe(chan, &control); 5775 } 5776 } 5777 5778 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, 5779 struct sk_buff **last_frag) 5780 { 5781 /* skb->len reflects data in skb as well as all fragments 5782 * skb->data_len reflects only data in fragments 5783 */ 5784 if (!skb_has_frag_list(skb)) 5785 skb_shinfo(skb)->frag_list = new_frag; 5786 5787 new_frag->next = NULL; 5788 5789 (*last_frag)->next = new_frag; 5790 *last_frag = new_frag; 5791 5792 skb->len += new_frag->len; 5793 skb->data_len += new_frag->len; 5794 skb->truesize += new_frag->truesize; 5795 } 5796 5797 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, 5798 struct l2cap_ctrl *control) 5799 { 5800 int err = -EINVAL; 5801 5802 switch (control->sar) { 5803 case L2CAP_SAR_UNSEGMENTED: 5804 if (chan->sdu) 5805 break; 5806 5807 err = chan->ops->recv(chan, skb); 5808 break; 5809 5810 case L2CAP_SAR_START: 5811 if (chan->sdu) 5812 break; 5813 5814 chan->sdu_len = get_unaligned_le16(skb->data); 5815 skb_pull(skb, L2CAP_SDULEN_SIZE); 5816 5817 if (chan->sdu_len > chan->imtu) { 5818 err = -EMSGSIZE; 5819 break; 5820 } 5821 5822 if (skb->len >= chan->sdu_len) 5823 break; 5824 5825 chan->sdu = skb; 5826 chan->sdu_last_frag = skb; 5827 5828 skb = NULL; 5829 err = 0; 5830 break; 5831 5832 case L2CAP_SAR_CONTINUE: 5833 if (!chan->sdu) 5834 break; 5835 5836 append_skb_frag(chan->sdu, skb, 5837 &chan->sdu_last_frag); 5838 skb = NULL; 5839 5840 if (chan->sdu->len >= chan->sdu_len) 5841 break; 5842 5843 err = 0; 5844 break; 5845 5846 case L2CAP_SAR_END: 5847 if (!chan->sdu) 5848 break; 5849 5850 append_skb_frag(chan->sdu, skb, 5851 &chan->sdu_last_frag); 5852 skb = NULL; 5853 5854 if (chan->sdu->len != chan->sdu_len) 5855 break; 5856 5857 err = chan->ops->recv(chan, chan->sdu); 5858 5859 if (!err) { 5860 /* Reassembly complete */ 5861 chan->sdu = NULL; 5862 chan->sdu_last_frag = NULL; 5863 chan->sdu_len = 0; 5864 } 5865 break; 5866 } 5867 5868 if (err) { 5869 kfree_skb(skb); 5870 kfree_skb(chan->sdu); 5871 chan->sdu = NULL; 5872 chan->sdu_last_frag = NULL; 5873 chan->sdu_len = 0; 5874 } 5875 5876 return err; 5877 } 5878 5879 static int l2cap_resegment(struct l2cap_chan *chan) 5880 { 5881 /* Placeholder */ 5882 return 0; 5883 } 5884 5885 void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 5886 { 5887 u8 event; 5888 5889 if (chan->mode != L2CAP_MODE_ERTM) 5890 return; 5891 5892 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; 5893 l2cap_tx(chan, NULL, NULL, event); 5894 } 5895 5896 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) 5897 { 5898 int err = 0; 5899 /* Pass sequential frames to l2cap_reassemble_sdu() 5900 * until a gap is encountered. 5901 */ 5902 5903 BT_DBG("chan %p", chan); 5904 5905 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5906 struct sk_buff *skb; 5907 BT_DBG("Searching for skb with txseq %d (queue len %d)", 5908 chan->buffer_seq, skb_queue_len(&chan->srej_q)); 5909 5910 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); 5911 5912 if (!skb) 5913 break; 5914 5915 skb_unlink(skb, &chan->srej_q); 5916 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 5917 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); 5918 if (err) 5919 break; 5920 } 5921 5922 if (skb_queue_empty(&chan->srej_q)) { 5923 chan->rx_state = L2CAP_RX_STATE_RECV; 5924 l2cap_send_ack(chan); 5925 } 5926 5927 return err; 5928 } 5929 5930 static void l2cap_handle_srej(struct l2cap_chan *chan, 5931 struct l2cap_ctrl *control) 5932 { 5933 struct sk_buff *skb; 5934 5935 BT_DBG("chan %p, control %p", chan, control); 5936 5937 if (control->reqseq == chan->next_tx_seq) { 5938 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5939 l2cap_send_disconn_req(chan, ECONNRESET); 5940 return; 5941 } 5942 5943 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 5944 5945 if (skb == NULL) { 5946 BT_DBG("Seq %d not available for retransmission", 5947 control->reqseq); 5948 return; 5949 } 5950 5951 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { 5952 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5953 l2cap_send_disconn_req(chan, ECONNRESET); 5954 return; 5955 } 5956 5957 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5958 5959 if (control->poll) { 5960 l2cap_pass_to_tx(chan, control); 5961 5962 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5963 l2cap_retransmit(chan, control); 5964 l2cap_ertm_send(chan); 5965 5966 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 5967 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5968 chan->srej_save_reqseq = control->reqseq; 5969 } 5970 } else { 5971 l2cap_pass_to_tx_fbit(chan, control); 5972 5973 if (control->final) { 5974 if (chan->srej_save_reqseq != control->reqseq || 5975 !test_and_clear_bit(CONN_SREJ_ACT, 5976 &chan->conn_state)) 5977 l2cap_retransmit(chan, control); 5978 } else { 5979 l2cap_retransmit(chan, control); 5980 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 5981 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5982 chan->srej_save_reqseq = control->reqseq; 5983 } 5984 } 5985 } 5986 } 5987 5988 static void l2cap_handle_rej(struct l2cap_chan *chan, 5989 struct l2cap_ctrl *control) 5990 { 5991 struct sk_buff *skb; 5992 5993 BT_DBG("chan %p, control %p", chan, control); 5994 5995 if (control->reqseq == chan->next_tx_seq) { 5996 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5997 l2cap_send_disconn_req(chan, ECONNRESET); 5998 return; 5999 } 6000 6001 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 6002 6003 if (chan->max_tx && skb && 6004 bt_cb(skb)->l2cap.retries >= chan->max_tx) { 6005 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 6006 l2cap_send_disconn_req(chan, ECONNRESET); 6007 return; 6008 } 6009 6010 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6011 6012 l2cap_pass_to_tx(chan, control); 6013 6014 if (control->final) { 6015 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 6016 l2cap_retransmit_all(chan, control); 6017 } else { 6018 l2cap_retransmit_all(chan, control); 6019 l2cap_ertm_send(chan); 6020 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) 6021 set_bit(CONN_REJ_ACT, &chan->conn_state); 6022 } 6023 } 6024 6025 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) 6026 { 6027 BT_DBG("chan %p, txseq %d", chan, txseq); 6028 6029 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, 6030 chan->expected_tx_seq); 6031 6032 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 6033 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 6034 chan->tx_win) { 6035 /* See notes below regarding "double poll" and 6036 * invalid packets. 6037 */ 6038 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 6039 BT_DBG("Invalid/Ignore - after SREJ"); 6040 return L2CAP_TXSEQ_INVALID_IGNORE; 6041 } else { 6042 BT_DBG("Invalid - in window after SREJ sent"); 6043 return L2CAP_TXSEQ_INVALID; 6044 } 6045 } 6046 6047 if (chan->srej_list.head == txseq) { 6048 BT_DBG("Expected SREJ"); 6049 return L2CAP_TXSEQ_EXPECTED_SREJ; 6050 } 6051 6052 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { 6053 BT_DBG("Duplicate SREJ - txseq already stored"); 6054 return L2CAP_TXSEQ_DUPLICATE_SREJ; 6055 } 6056 6057 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { 6058 BT_DBG("Unexpected SREJ - not requested"); 6059 return L2CAP_TXSEQ_UNEXPECTED_SREJ; 6060 } 6061 } 6062 6063 if (chan->expected_tx_seq == txseq) { 6064 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 6065 chan->tx_win) { 6066 BT_DBG("Invalid - txseq outside tx window"); 6067 return L2CAP_TXSEQ_INVALID; 6068 } else { 6069 BT_DBG("Expected"); 6070 return L2CAP_TXSEQ_EXPECTED; 6071 } 6072 } 6073 6074 if (__seq_offset(chan, txseq, chan->last_acked_seq) < 6075 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { 6076 BT_DBG("Duplicate - expected_tx_seq later than txseq"); 6077 return L2CAP_TXSEQ_DUPLICATE; 6078 } 6079 6080 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { 6081 /* A source of invalid packets is a "double poll" condition, 6082 * where delays cause us to send multiple poll packets. If 6083 * the remote stack receives and processes both polls, 6084 * sequence numbers can wrap around in such a way that a 6085 * resent frame has a sequence number that looks like new data 6086 * with a sequence gap. This would trigger an erroneous SREJ 6087 * request. 6088 * 6089 * Fortunately, this is impossible with a tx window that's 6090 * less than half of the maximum sequence number, which allows 6091 * invalid frames to be safely ignored. 6092 * 6093 * With tx window sizes greater than half of the tx window 6094 * maximum, the frame is invalid and cannot be ignored. This 6095 * causes a disconnect. 6096 */ 6097 6098 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 6099 BT_DBG("Invalid/Ignore - txseq outside tx window"); 6100 return L2CAP_TXSEQ_INVALID_IGNORE; 6101 } else { 6102 BT_DBG("Invalid - txseq outside tx window"); 6103 return L2CAP_TXSEQ_INVALID; 6104 } 6105 } else { 6106 BT_DBG("Unexpected - txseq indicates missing frames"); 6107 return L2CAP_TXSEQ_UNEXPECTED; 6108 } 6109 } 6110 6111 static int l2cap_rx_state_recv(struct l2cap_chan *chan, 6112 struct l2cap_ctrl *control, 6113 struct sk_buff *skb, u8 event) 6114 { 6115 int err = 0; 6116 bool skb_in_use = false; 6117 6118 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6119 event); 6120 6121 switch (event) { 6122 case L2CAP_EV_RECV_IFRAME: 6123 switch (l2cap_classify_txseq(chan, control->txseq)) { 6124 case L2CAP_TXSEQ_EXPECTED: 6125 l2cap_pass_to_tx(chan, control); 6126 6127 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6128 BT_DBG("Busy, discarding expected seq %d", 6129 control->txseq); 6130 break; 6131 } 6132 6133 chan->expected_tx_seq = __next_seq(chan, 6134 control->txseq); 6135 6136 chan->buffer_seq = chan->expected_tx_seq; 6137 skb_in_use = true; 6138 6139 err = l2cap_reassemble_sdu(chan, skb, control); 6140 if (err) 6141 break; 6142 6143 if (control->final) { 6144 if (!test_and_clear_bit(CONN_REJ_ACT, 6145 &chan->conn_state)) { 6146 control->final = 0; 6147 l2cap_retransmit_all(chan, control); 6148 l2cap_ertm_send(chan); 6149 } 6150 } 6151 6152 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 6153 l2cap_send_ack(chan); 6154 break; 6155 case L2CAP_TXSEQ_UNEXPECTED: 6156 l2cap_pass_to_tx(chan, control); 6157 6158 /* Can't issue SREJ frames in the local busy state. 6159 * Drop this frame, it will be seen as missing 6160 * when local busy is exited. 6161 */ 6162 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6163 BT_DBG("Busy, discarding unexpected seq %d", 6164 control->txseq); 6165 break; 6166 } 6167 6168 /* There was a gap in the sequence, so an SREJ 6169 * must be sent for each missing frame. The 6170 * current frame is stored for later use. 6171 */ 6172 skb_queue_tail(&chan->srej_q, skb); 6173 skb_in_use = true; 6174 BT_DBG("Queued %p (queue len %d)", skb, 6175 skb_queue_len(&chan->srej_q)); 6176 6177 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 6178 l2cap_seq_list_clear(&chan->srej_list); 6179 l2cap_send_srej(chan, control->txseq); 6180 6181 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; 6182 break; 6183 case L2CAP_TXSEQ_DUPLICATE: 6184 l2cap_pass_to_tx(chan, control); 6185 break; 6186 case L2CAP_TXSEQ_INVALID_IGNORE: 6187 break; 6188 case L2CAP_TXSEQ_INVALID: 6189 default: 6190 l2cap_send_disconn_req(chan, ECONNRESET); 6191 break; 6192 } 6193 break; 6194 case L2CAP_EV_RECV_RR: 6195 l2cap_pass_to_tx(chan, control); 6196 if (control->final) { 6197 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6198 6199 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) && 6200 !__chan_is_moving(chan)) { 6201 control->final = 0; 6202 l2cap_retransmit_all(chan, control); 6203 } 6204 6205 l2cap_ertm_send(chan); 6206 } else if (control->poll) { 6207 l2cap_send_i_or_rr_or_rnr(chan); 6208 } else { 6209 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6210 &chan->conn_state) && 6211 chan->unacked_frames) 6212 __set_retrans_timer(chan); 6213 6214 l2cap_ertm_send(chan); 6215 } 6216 break; 6217 case L2CAP_EV_RECV_RNR: 6218 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6219 l2cap_pass_to_tx(chan, control); 6220 if (control && control->poll) { 6221 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6222 l2cap_send_rr_or_rnr(chan, 0); 6223 } 6224 __clear_retrans_timer(chan); 6225 l2cap_seq_list_clear(&chan->retrans_list); 6226 break; 6227 case L2CAP_EV_RECV_REJ: 6228 l2cap_handle_rej(chan, control); 6229 break; 6230 case L2CAP_EV_RECV_SREJ: 6231 l2cap_handle_srej(chan, control); 6232 break; 6233 default: 6234 break; 6235 } 6236 6237 if (skb && !skb_in_use) { 6238 BT_DBG("Freeing %p", skb); 6239 kfree_skb(skb); 6240 } 6241 6242 return err; 6243 } 6244 6245 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, 6246 struct l2cap_ctrl *control, 6247 struct sk_buff *skb, u8 event) 6248 { 6249 int err = 0; 6250 u16 txseq = control->txseq; 6251 bool skb_in_use = false; 6252 6253 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6254 event); 6255 6256 switch (event) { 6257 case L2CAP_EV_RECV_IFRAME: 6258 switch (l2cap_classify_txseq(chan, txseq)) { 6259 case L2CAP_TXSEQ_EXPECTED: 6260 /* Keep frame for reassembly later */ 6261 l2cap_pass_to_tx(chan, control); 6262 skb_queue_tail(&chan->srej_q, skb); 6263 skb_in_use = true; 6264 BT_DBG("Queued %p (queue len %d)", skb, 6265 skb_queue_len(&chan->srej_q)); 6266 6267 chan->expected_tx_seq = __next_seq(chan, txseq); 6268 break; 6269 case L2CAP_TXSEQ_EXPECTED_SREJ: 6270 l2cap_seq_list_pop(&chan->srej_list); 6271 6272 l2cap_pass_to_tx(chan, control); 6273 skb_queue_tail(&chan->srej_q, skb); 6274 skb_in_use = true; 6275 BT_DBG("Queued %p (queue len %d)", skb, 6276 skb_queue_len(&chan->srej_q)); 6277 6278 err = l2cap_rx_queued_iframes(chan); 6279 if (err) 6280 break; 6281 6282 break; 6283 case L2CAP_TXSEQ_UNEXPECTED: 6284 /* Got a frame that can't be reassembled yet. 6285 * Save it for later, and send SREJs to cover 6286 * the missing frames. 6287 */ 6288 skb_queue_tail(&chan->srej_q, skb); 6289 skb_in_use = true; 6290 BT_DBG("Queued %p (queue len %d)", skb, 6291 skb_queue_len(&chan->srej_q)); 6292 6293 l2cap_pass_to_tx(chan, control); 6294 l2cap_send_srej(chan, control->txseq); 6295 break; 6296 case L2CAP_TXSEQ_UNEXPECTED_SREJ: 6297 /* This frame was requested with an SREJ, but 6298 * some expected retransmitted frames are 6299 * missing. Request retransmission of missing 6300 * SREJ'd frames. 6301 */ 6302 skb_queue_tail(&chan->srej_q, skb); 6303 skb_in_use = true; 6304 BT_DBG("Queued %p (queue len %d)", skb, 6305 skb_queue_len(&chan->srej_q)); 6306 6307 l2cap_pass_to_tx(chan, control); 6308 l2cap_send_srej_list(chan, control->txseq); 6309 break; 6310 case L2CAP_TXSEQ_DUPLICATE_SREJ: 6311 /* We've already queued this frame. Drop this copy. */ 6312 l2cap_pass_to_tx(chan, control); 6313 break; 6314 case L2CAP_TXSEQ_DUPLICATE: 6315 /* Expecting a later sequence number, so this frame 6316 * was already received. Ignore it completely. 6317 */ 6318 break; 6319 case L2CAP_TXSEQ_INVALID_IGNORE: 6320 break; 6321 case L2CAP_TXSEQ_INVALID: 6322 default: 6323 l2cap_send_disconn_req(chan, ECONNRESET); 6324 break; 6325 } 6326 break; 6327 case L2CAP_EV_RECV_RR: 6328 l2cap_pass_to_tx(chan, control); 6329 if (control->final) { 6330 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6331 6332 if (!test_and_clear_bit(CONN_REJ_ACT, 6333 &chan->conn_state)) { 6334 control->final = 0; 6335 l2cap_retransmit_all(chan, control); 6336 } 6337 6338 l2cap_ertm_send(chan); 6339 } else if (control->poll) { 6340 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6341 &chan->conn_state) && 6342 chan->unacked_frames) { 6343 __set_retrans_timer(chan); 6344 } 6345 6346 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6347 l2cap_send_srej_tail(chan); 6348 } else { 6349 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6350 &chan->conn_state) && 6351 chan->unacked_frames) 6352 __set_retrans_timer(chan); 6353 6354 l2cap_send_ack(chan); 6355 } 6356 break; 6357 case L2CAP_EV_RECV_RNR: 6358 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6359 l2cap_pass_to_tx(chan, control); 6360 if (control->poll) { 6361 l2cap_send_srej_tail(chan); 6362 } else { 6363 struct l2cap_ctrl rr_control; 6364 memset(&rr_control, 0, sizeof(rr_control)); 6365 rr_control.sframe = 1; 6366 rr_control.super = L2CAP_SUPER_RR; 6367 rr_control.reqseq = chan->buffer_seq; 6368 l2cap_send_sframe(chan, &rr_control); 6369 } 6370 6371 break; 6372 case L2CAP_EV_RECV_REJ: 6373 l2cap_handle_rej(chan, control); 6374 break; 6375 case L2CAP_EV_RECV_SREJ: 6376 l2cap_handle_srej(chan, control); 6377 break; 6378 } 6379 6380 if (skb && !skb_in_use) { 6381 BT_DBG("Freeing %p", skb); 6382 kfree_skb(skb); 6383 } 6384 6385 return err; 6386 } 6387 6388 static int l2cap_finish_move(struct l2cap_chan *chan) 6389 { 6390 BT_DBG("chan %p", chan); 6391 6392 chan->rx_state = L2CAP_RX_STATE_RECV; 6393 6394 if (chan->hs_hcon) 6395 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; 6396 else 6397 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; 6398 6399 return l2cap_resegment(chan); 6400 } 6401 6402 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, 6403 struct l2cap_ctrl *control, 6404 struct sk_buff *skb, u8 event) 6405 { 6406 int err; 6407 6408 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6409 event); 6410 6411 if (!control->poll) 6412 return -EPROTO; 6413 6414 l2cap_process_reqseq(chan, control->reqseq); 6415 6416 if (!skb_queue_empty(&chan->tx_q)) 6417 chan->tx_send_head = skb_peek(&chan->tx_q); 6418 else 6419 chan->tx_send_head = NULL; 6420 6421 /* Rewind next_tx_seq to the point expected 6422 * by the receiver. 6423 */ 6424 chan->next_tx_seq = control->reqseq; 6425 chan->unacked_frames = 0; 6426 6427 err = l2cap_finish_move(chan); 6428 if (err) 6429 return err; 6430 6431 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6432 l2cap_send_i_or_rr_or_rnr(chan); 6433 6434 if (event == L2CAP_EV_RECV_IFRAME) 6435 return -EPROTO; 6436 6437 return l2cap_rx_state_recv(chan, control, NULL, event); 6438 } 6439 6440 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, 6441 struct l2cap_ctrl *control, 6442 struct sk_buff *skb, u8 event) 6443 { 6444 int err; 6445 6446 if (!control->final) 6447 return -EPROTO; 6448 6449 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6450 6451 chan->rx_state = L2CAP_RX_STATE_RECV; 6452 l2cap_process_reqseq(chan, control->reqseq); 6453 6454 if (!skb_queue_empty(&chan->tx_q)) 6455 chan->tx_send_head = skb_peek(&chan->tx_q); 6456 else 6457 chan->tx_send_head = NULL; 6458 6459 /* Rewind next_tx_seq to the point expected 6460 * by the receiver. 6461 */ 6462 chan->next_tx_seq = control->reqseq; 6463 chan->unacked_frames = 0; 6464 6465 if (chan->hs_hcon) 6466 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; 6467 else 6468 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; 6469 6470 err = l2cap_resegment(chan); 6471 6472 if (!err) 6473 err = l2cap_rx_state_recv(chan, control, skb, event); 6474 6475 return err; 6476 } 6477 6478 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) 6479 { 6480 /* Make sure reqseq is for a packet that has been sent but not acked */ 6481 u16 unacked; 6482 6483 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); 6484 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; 6485 } 6486 6487 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6488 struct sk_buff *skb, u8 event) 6489 { 6490 int err = 0; 6491 6492 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, 6493 control, skb, event, chan->rx_state); 6494 6495 if (__valid_reqseq(chan, control->reqseq)) { 6496 switch (chan->rx_state) { 6497 case L2CAP_RX_STATE_RECV: 6498 err = l2cap_rx_state_recv(chan, control, skb, event); 6499 break; 6500 case L2CAP_RX_STATE_SREJ_SENT: 6501 err = l2cap_rx_state_srej_sent(chan, control, skb, 6502 event); 6503 break; 6504 case L2CAP_RX_STATE_WAIT_P: 6505 err = l2cap_rx_state_wait_p(chan, control, skb, event); 6506 break; 6507 case L2CAP_RX_STATE_WAIT_F: 6508 err = l2cap_rx_state_wait_f(chan, control, skb, event); 6509 break; 6510 default: 6511 /* shut it down */ 6512 break; 6513 } 6514 } else { 6515 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", 6516 control->reqseq, chan->next_tx_seq, 6517 chan->expected_ack_seq); 6518 l2cap_send_disconn_req(chan, ECONNRESET); 6519 } 6520 6521 return err; 6522 } 6523 6524 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6525 struct sk_buff *skb) 6526 { 6527 int err = 0; 6528 6529 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, 6530 chan->rx_state); 6531 6532 if (l2cap_classify_txseq(chan, control->txseq) == 6533 L2CAP_TXSEQ_EXPECTED) { 6534 l2cap_pass_to_tx(chan, control); 6535 6536 BT_DBG("buffer_seq %d->%d", chan->buffer_seq, 6537 __next_seq(chan, chan->buffer_seq)); 6538 6539 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 6540 6541 l2cap_reassemble_sdu(chan, skb, control); 6542 } else { 6543 if (chan->sdu) { 6544 kfree_skb(chan->sdu); 6545 chan->sdu = NULL; 6546 } 6547 chan->sdu_last_frag = NULL; 6548 chan->sdu_len = 0; 6549 6550 if (skb) { 6551 BT_DBG("Freeing %p", skb); 6552 kfree_skb(skb); 6553 } 6554 } 6555 6556 chan->last_acked_seq = control->txseq; 6557 chan->expected_tx_seq = __next_seq(chan, control->txseq); 6558 6559 return err; 6560 } 6561 6562 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6563 { 6564 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; 6565 u16 len; 6566 u8 event; 6567 6568 __unpack_control(chan, skb); 6569 6570 len = skb->len; 6571 6572 /* 6573 * We can just drop the corrupted I-frame here. 6574 * Receiver will miss it and start proper recovery 6575 * procedures and ask for retransmission. 6576 */ 6577 if (l2cap_check_fcs(chan, skb)) 6578 goto drop; 6579 6580 if (!control->sframe && control->sar == L2CAP_SAR_START) 6581 len -= L2CAP_SDULEN_SIZE; 6582 6583 if (chan->fcs == L2CAP_FCS_CRC16) 6584 len -= L2CAP_FCS_SIZE; 6585 6586 if (len > chan->mps) { 6587 l2cap_send_disconn_req(chan, ECONNRESET); 6588 goto drop; 6589 } 6590 6591 if (!control->sframe) { 6592 int err; 6593 6594 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", 6595 control->sar, control->reqseq, control->final, 6596 control->txseq); 6597 6598 /* Validate F-bit - F=0 always valid, F=1 only 6599 * valid in TX WAIT_F 6600 */ 6601 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) 6602 goto drop; 6603 6604 if (chan->mode != L2CAP_MODE_STREAMING) { 6605 event = L2CAP_EV_RECV_IFRAME; 6606 err = l2cap_rx(chan, control, skb, event); 6607 } else { 6608 err = l2cap_stream_rx(chan, control, skb); 6609 } 6610 6611 if (err) 6612 l2cap_send_disconn_req(chan, ECONNRESET); 6613 } else { 6614 const u8 rx_func_to_event[4] = { 6615 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, 6616 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ 6617 }; 6618 6619 /* Only I-frames are expected in streaming mode */ 6620 if (chan->mode == L2CAP_MODE_STREAMING) 6621 goto drop; 6622 6623 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", 6624 control->reqseq, control->final, control->poll, 6625 control->super); 6626 6627 if (len != 0) { 6628 BT_ERR("Trailing bytes: %d in sframe", len); 6629 l2cap_send_disconn_req(chan, ECONNRESET); 6630 goto drop; 6631 } 6632 6633 /* Validate F and P bits */ 6634 if (control->final && (control->poll || 6635 chan->tx_state != L2CAP_TX_STATE_WAIT_F)) 6636 goto drop; 6637 6638 event = rx_func_to_event[control->super]; 6639 if (l2cap_rx(chan, control, skb, event)) 6640 l2cap_send_disconn_req(chan, ECONNRESET); 6641 } 6642 6643 return 0; 6644 6645 drop: 6646 kfree_skb(skb); 6647 return 0; 6648 } 6649 6650 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) 6651 { 6652 struct l2cap_conn *conn = chan->conn; 6653 struct l2cap_le_credits pkt; 6654 u16 return_credits; 6655 6656 /* We return more credits to the sender only after the amount of 6657 * credits falls below half of the initial amount. 6658 */ 6659 if (chan->rx_credits >= (le_max_credits + 1) / 2) 6660 return; 6661 6662 return_credits = le_max_credits - chan->rx_credits; 6663 6664 BT_DBG("chan %p returning %u credits to sender", chan, return_credits); 6665 6666 chan->rx_credits += return_credits; 6667 6668 pkt.cid = cpu_to_le16(chan->scid); 6669 pkt.credits = cpu_to_le16(return_credits); 6670 6671 chan->ident = l2cap_get_ident(conn); 6672 6673 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); 6674 } 6675 6676 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6677 { 6678 int err; 6679 6680 if (!chan->rx_credits) { 6681 BT_ERR("No credits to receive LE L2CAP data"); 6682 l2cap_send_disconn_req(chan, ECONNRESET); 6683 return -ENOBUFS; 6684 } 6685 6686 if (chan->imtu < skb->len) { 6687 BT_ERR("Too big LE L2CAP PDU"); 6688 return -ENOBUFS; 6689 } 6690 6691 chan->rx_credits--; 6692 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); 6693 6694 l2cap_chan_le_send_credits(chan); 6695 6696 err = 0; 6697 6698 if (!chan->sdu) { 6699 u16 sdu_len; 6700 6701 sdu_len = get_unaligned_le16(skb->data); 6702 skb_pull(skb, L2CAP_SDULEN_SIZE); 6703 6704 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", 6705 sdu_len, skb->len, chan->imtu); 6706 6707 if (sdu_len > chan->imtu) { 6708 BT_ERR("Too big LE L2CAP SDU length received"); 6709 err = -EMSGSIZE; 6710 goto failed; 6711 } 6712 6713 if (skb->len > sdu_len) { 6714 BT_ERR("Too much LE L2CAP data received"); 6715 err = -EINVAL; 6716 goto failed; 6717 } 6718 6719 if (skb->len == sdu_len) 6720 return chan->ops->recv(chan, skb); 6721 6722 chan->sdu = skb; 6723 chan->sdu_len = sdu_len; 6724 chan->sdu_last_frag = skb; 6725 6726 return 0; 6727 } 6728 6729 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", 6730 chan->sdu->len, skb->len, chan->sdu_len); 6731 6732 if (chan->sdu->len + skb->len > chan->sdu_len) { 6733 BT_ERR("Too much LE L2CAP data received"); 6734 err = -EINVAL; 6735 goto failed; 6736 } 6737 6738 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); 6739 skb = NULL; 6740 6741 if (chan->sdu->len == chan->sdu_len) { 6742 err = chan->ops->recv(chan, chan->sdu); 6743 if (!err) { 6744 chan->sdu = NULL; 6745 chan->sdu_last_frag = NULL; 6746 chan->sdu_len = 0; 6747 } 6748 } 6749 6750 failed: 6751 if (err) { 6752 kfree_skb(skb); 6753 kfree_skb(chan->sdu); 6754 chan->sdu = NULL; 6755 chan->sdu_last_frag = NULL; 6756 chan->sdu_len = 0; 6757 } 6758 6759 /* We can't return an error here since we took care of the skb 6760 * freeing internally. An error return would cause the caller to 6761 * do a double-free of the skb. 6762 */ 6763 return 0; 6764 } 6765 6766 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, 6767 struct sk_buff *skb) 6768 { 6769 struct l2cap_chan *chan; 6770 6771 chan = l2cap_get_chan_by_scid(conn, cid); 6772 if (!chan) { 6773 if (cid == L2CAP_CID_A2MP) { 6774 chan = a2mp_channel_create(conn, skb); 6775 if (!chan) { 6776 kfree_skb(skb); 6777 return; 6778 } 6779 6780 l2cap_chan_lock(chan); 6781 } else { 6782 BT_DBG("unknown cid 0x%4.4x", cid); 6783 /* Drop packet and return */ 6784 kfree_skb(skb); 6785 return; 6786 } 6787 } 6788 6789 BT_DBG("chan %p, len %d", chan, skb->len); 6790 6791 /* If we receive data on a fixed channel before the info req/rsp 6792 * procdure is done simply assume that the channel is supported 6793 * and mark it as ready. 6794 */ 6795 if (chan->chan_type == L2CAP_CHAN_FIXED) 6796 l2cap_chan_ready(chan); 6797 6798 if (chan->state != BT_CONNECTED) 6799 goto drop; 6800 6801 switch (chan->mode) { 6802 case L2CAP_MODE_LE_FLOWCTL: 6803 if (l2cap_le_data_rcv(chan, skb) < 0) 6804 goto drop; 6805 6806 goto done; 6807 6808 case L2CAP_MODE_BASIC: 6809 /* If socket recv buffers overflows we drop data here 6810 * which is *bad* because L2CAP has to be reliable. 6811 * But we don't have any other choice. L2CAP doesn't 6812 * provide flow control mechanism. */ 6813 6814 if (chan->imtu < skb->len) { 6815 BT_ERR("Dropping L2CAP data: receive buffer overflow"); 6816 goto drop; 6817 } 6818 6819 if (!chan->ops->recv(chan, skb)) 6820 goto done; 6821 break; 6822 6823 case L2CAP_MODE_ERTM: 6824 case L2CAP_MODE_STREAMING: 6825 l2cap_data_rcv(chan, skb); 6826 goto done; 6827 6828 default: 6829 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); 6830 break; 6831 } 6832 6833 drop: 6834 kfree_skb(skb); 6835 6836 done: 6837 l2cap_chan_unlock(chan); 6838 } 6839 6840 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, 6841 struct sk_buff *skb) 6842 { 6843 struct hci_conn *hcon = conn->hcon; 6844 struct l2cap_chan *chan; 6845 6846 if (hcon->type != ACL_LINK) 6847 goto free_skb; 6848 6849 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, 6850 ACL_LINK); 6851 if (!chan) 6852 goto free_skb; 6853 6854 BT_DBG("chan %p, len %d", chan, skb->len); 6855 6856 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 6857 goto drop; 6858 6859 if (chan->imtu < skb->len) 6860 goto drop; 6861 6862 /* Store remote BD_ADDR and PSM for msg_name */ 6863 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); 6864 bt_cb(skb)->l2cap.psm = psm; 6865 6866 if (!chan->ops->recv(chan, skb)) { 6867 l2cap_chan_put(chan); 6868 return; 6869 } 6870 6871 drop: 6872 l2cap_chan_put(chan); 6873 free_skb: 6874 kfree_skb(skb); 6875 } 6876 6877 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 6878 { 6879 struct l2cap_hdr *lh = (void *) skb->data; 6880 struct hci_conn *hcon = conn->hcon; 6881 u16 cid, len; 6882 __le16 psm; 6883 6884 if (hcon->state != BT_CONNECTED) { 6885 BT_DBG("queueing pending rx skb"); 6886 skb_queue_tail(&conn->pending_rx, skb); 6887 return; 6888 } 6889 6890 skb_pull(skb, L2CAP_HDR_SIZE); 6891 cid = __le16_to_cpu(lh->cid); 6892 len = __le16_to_cpu(lh->len); 6893 6894 if (len != skb->len) { 6895 kfree_skb(skb); 6896 return; 6897 } 6898 6899 /* Since we can't actively block incoming LE connections we must 6900 * at least ensure that we ignore incoming data from them. 6901 */ 6902 if (hcon->type == LE_LINK && 6903 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst, 6904 bdaddr_dst_type(hcon))) { 6905 kfree_skb(skb); 6906 return; 6907 } 6908 6909 BT_DBG("len %d, cid 0x%4.4x", len, cid); 6910 6911 switch (cid) { 6912 case L2CAP_CID_SIGNALING: 6913 l2cap_sig_channel(conn, skb); 6914 break; 6915 6916 case L2CAP_CID_CONN_LESS: 6917 psm = get_unaligned((__le16 *) skb->data); 6918 skb_pull(skb, L2CAP_PSMLEN_SIZE); 6919 l2cap_conless_channel(conn, psm, skb); 6920 break; 6921 6922 case L2CAP_CID_LE_SIGNALING: 6923 l2cap_le_sig_channel(conn, skb); 6924 break; 6925 6926 default: 6927 l2cap_data_channel(conn, cid, skb); 6928 break; 6929 } 6930 } 6931 6932 static void process_pending_rx(struct work_struct *work) 6933 { 6934 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 6935 pending_rx_work); 6936 struct sk_buff *skb; 6937 6938 BT_DBG(""); 6939 6940 while ((skb = skb_dequeue(&conn->pending_rx))) 6941 l2cap_recv_frame(conn, skb); 6942 } 6943 6944 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) 6945 { 6946 struct l2cap_conn *conn = hcon->l2cap_data; 6947 struct hci_chan *hchan; 6948 6949 if (conn) 6950 return conn; 6951 6952 hchan = hci_chan_create(hcon); 6953 if (!hchan) 6954 return NULL; 6955 6956 conn = kzalloc(sizeof(*conn), GFP_KERNEL); 6957 if (!conn) { 6958 hci_chan_del(hchan); 6959 return NULL; 6960 } 6961 6962 kref_init(&conn->ref); 6963 hcon->l2cap_data = conn; 6964 conn->hcon = hci_conn_get(hcon); 6965 conn->hchan = hchan; 6966 6967 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 6968 6969 switch (hcon->type) { 6970 case LE_LINK: 6971 if (hcon->hdev->le_mtu) { 6972 conn->mtu = hcon->hdev->le_mtu; 6973 break; 6974 } 6975 /* fall through */ 6976 default: 6977 conn->mtu = hcon->hdev->acl_mtu; 6978 break; 6979 } 6980 6981 conn->feat_mask = 0; 6982 6983 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; 6984 6985 if (hcon->type == ACL_LINK && 6986 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED)) 6987 conn->local_fixed_chan |= L2CAP_FC_A2MP; 6988 6989 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && 6990 (bredr_sc_enabled(hcon->hdev) || 6991 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) 6992 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; 6993 6994 mutex_init(&conn->ident_lock); 6995 mutex_init(&conn->chan_lock); 6996 6997 INIT_LIST_HEAD(&conn->chan_l); 6998 INIT_LIST_HEAD(&conn->users); 6999 7000 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); 7001 7002 skb_queue_head_init(&conn->pending_rx); 7003 INIT_WORK(&conn->pending_rx_work, process_pending_rx); 7004 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr); 7005 7006 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 7007 7008 return conn; 7009 } 7010 7011 static bool is_valid_psm(u16 psm, u8 dst_type) { 7012 if (!psm) 7013 return false; 7014 7015 if (bdaddr_type_is_le(dst_type)) 7016 return (psm <= 0x00ff); 7017 7018 /* PSM must be odd and lsb of upper byte must be 0 */ 7019 return ((psm & 0x0101) == 0x0001); 7020 } 7021 7022 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, 7023 bdaddr_t *dst, u8 dst_type) 7024 { 7025 struct l2cap_conn *conn; 7026 struct hci_conn *hcon; 7027 struct hci_dev *hdev; 7028 int err; 7029 7030 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, 7031 dst_type, __le16_to_cpu(psm)); 7032 7033 hdev = hci_get_route(dst, &chan->src); 7034 if (!hdev) 7035 return -EHOSTUNREACH; 7036 7037 hci_dev_lock(hdev); 7038 7039 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && 7040 chan->chan_type != L2CAP_CHAN_RAW) { 7041 err = -EINVAL; 7042 goto done; 7043 } 7044 7045 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { 7046 err = -EINVAL; 7047 goto done; 7048 } 7049 7050 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { 7051 err = -EINVAL; 7052 goto done; 7053 } 7054 7055 switch (chan->mode) { 7056 case L2CAP_MODE_BASIC: 7057 break; 7058 case L2CAP_MODE_LE_FLOWCTL: 7059 l2cap_le_flowctl_init(chan); 7060 break; 7061 case L2CAP_MODE_ERTM: 7062 case L2CAP_MODE_STREAMING: 7063 if (!disable_ertm) 7064 break; 7065 /* fall through */ 7066 default: 7067 err = -EOPNOTSUPP; 7068 goto done; 7069 } 7070 7071 switch (chan->state) { 7072 case BT_CONNECT: 7073 case BT_CONNECT2: 7074 case BT_CONFIG: 7075 /* Already connecting */ 7076 err = 0; 7077 goto done; 7078 7079 case BT_CONNECTED: 7080 /* Already connected */ 7081 err = -EISCONN; 7082 goto done; 7083 7084 case BT_OPEN: 7085 case BT_BOUND: 7086 /* Can connect */ 7087 break; 7088 7089 default: 7090 err = -EBADFD; 7091 goto done; 7092 } 7093 7094 /* Set destination address and psm */ 7095 bacpy(&chan->dst, dst); 7096 chan->dst_type = dst_type; 7097 7098 chan->psm = psm; 7099 chan->dcid = cid; 7100 7101 if (bdaddr_type_is_le(dst_type)) { 7102 u8 role; 7103 7104 /* Convert from L2CAP channel address type to HCI address type 7105 */ 7106 if (dst_type == BDADDR_LE_PUBLIC) 7107 dst_type = ADDR_LE_DEV_PUBLIC; 7108 else 7109 dst_type = ADDR_LE_DEV_RANDOM; 7110 7111 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 7112 role = HCI_ROLE_SLAVE; 7113 else 7114 role = HCI_ROLE_MASTER; 7115 7116 hcon = hci_connect_le_scan(hdev, dst, dst_type, 7117 chan->sec_level, 7118 HCI_LE_CONN_TIMEOUT, 7119 role); 7120 } else { 7121 u8 auth_type = l2cap_get_auth_type(chan); 7122 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); 7123 } 7124 7125 if (IS_ERR(hcon)) { 7126 err = PTR_ERR(hcon); 7127 goto done; 7128 } 7129 7130 conn = l2cap_conn_add(hcon); 7131 if (!conn) { 7132 hci_conn_drop(hcon); 7133 err = -ENOMEM; 7134 goto done; 7135 } 7136 7137 mutex_lock(&conn->chan_lock); 7138 l2cap_chan_lock(chan); 7139 7140 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { 7141 hci_conn_drop(hcon); 7142 err = -EBUSY; 7143 goto chan_unlock; 7144 } 7145 7146 /* Update source addr of the socket */ 7147 bacpy(&chan->src, &hcon->src); 7148 chan->src_type = bdaddr_src_type(hcon); 7149 7150 __l2cap_chan_add(conn, chan); 7151 7152 /* l2cap_chan_add takes its own ref so we can drop this one */ 7153 hci_conn_drop(hcon); 7154 7155 l2cap_state_change(chan, BT_CONNECT); 7156 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 7157 7158 /* Release chan->sport so that it can be reused by other 7159 * sockets (as it's only used for listening sockets). 7160 */ 7161 write_lock(&chan_list_lock); 7162 chan->sport = 0; 7163 write_unlock(&chan_list_lock); 7164 7165 if (hcon->state == BT_CONNECTED) { 7166 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 7167 __clear_chan_timer(chan); 7168 if (l2cap_chan_check_security(chan, true)) 7169 l2cap_state_change(chan, BT_CONNECTED); 7170 } else 7171 l2cap_do_start(chan); 7172 } 7173 7174 err = 0; 7175 7176 chan_unlock: 7177 l2cap_chan_unlock(chan); 7178 mutex_unlock(&conn->chan_lock); 7179 done: 7180 hci_dev_unlock(hdev); 7181 hci_dev_put(hdev); 7182 return err; 7183 } 7184 EXPORT_SYMBOL_GPL(l2cap_chan_connect); 7185 7186 /* ---- L2CAP interface with lower layer (HCI) ---- */ 7187 7188 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 7189 { 7190 int exact = 0, lm1 = 0, lm2 = 0; 7191 struct l2cap_chan *c; 7192 7193 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); 7194 7195 /* Find listening sockets and check their link_mode */ 7196 read_lock(&chan_list_lock); 7197 list_for_each_entry(c, &chan_list, global_l) { 7198 if (c->state != BT_LISTEN) 7199 continue; 7200 7201 if (!bacmp(&c->src, &hdev->bdaddr)) { 7202 lm1 |= HCI_LM_ACCEPT; 7203 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 7204 lm1 |= HCI_LM_MASTER; 7205 exact++; 7206 } else if (!bacmp(&c->src, BDADDR_ANY)) { 7207 lm2 |= HCI_LM_ACCEPT; 7208 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 7209 lm2 |= HCI_LM_MASTER; 7210 } 7211 } 7212 read_unlock(&chan_list_lock); 7213 7214 return exact ? lm1 : lm2; 7215 } 7216 7217 /* Find the next fixed channel in BT_LISTEN state, continue iteration 7218 * from an existing channel in the list or from the beginning of the 7219 * global list (by passing NULL as first parameter). 7220 */ 7221 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, 7222 struct hci_conn *hcon) 7223 { 7224 u8 src_type = bdaddr_src_type(hcon); 7225 7226 read_lock(&chan_list_lock); 7227 7228 if (c) 7229 c = list_next_entry(c, global_l); 7230 else 7231 c = list_entry(chan_list.next, typeof(*c), global_l); 7232 7233 list_for_each_entry_from(c, &chan_list, global_l) { 7234 if (c->chan_type != L2CAP_CHAN_FIXED) 7235 continue; 7236 if (c->state != BT_LISTEN) 7237 continue; 7238 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) 7239 continue; 7240 if (src_type != c->src_type) 7241 continue; 7242 7243 l2cap_chan_hold(c); 7244 read_unlock(&chan_list_lock); 7245 return c; 7246 } 7247 7248 read_unlock(&chan_list_lock); 7249 7250 return NULL; 7251 } 7252 7253 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 7254 { 7255 struct hci_dev *hdev = hcon->hdev; 7256 struct l2cap_conn *conn; 7257 struct l2cap_chan *pchan; 7258 u8 dst_type; 7259 7260 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 7261 return; 7262 7263 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); 7264 7265 if (status) { 7266 l2cap_conn_del(hcon, bt_to_errno(status)); 7267 return; 7268 } 7269 7270 conn = l2cap_conn_add(hcon); 7271 if (!conn) 7272 return; 7273 7274 dst_type = bdaddr_dst_type(hcon); 7275 7276 /* If device is blocked, do not create channels for it */ 7277 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type)) 7278 return; 7279 7280 /* Find fixed channels and notify them of the new connection. We 7281 * use multiple individual lookups, continuing each time where 7282 * we left off, because the list lock would prevent calling the 7283 * potentially sleeping l2cap_chan_lock() function. 7284 */ 7285 pchan = l2cap_global_fixed_chan(NULL, hcon); 7286 while (pchan) { 7287 struct l2cap_chan *chan, *next; 7288 7289 /* Client fixed channels should override server ones */ 7290 if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) 7291 goto next; 7292 7293 l2cap_chan_lock(pchan); 7294 chan = pchan->ops->new_connection(pchan); 7295 if (chan) { 7296 bacpy(&chan->src, &hcon->src); 7297 bacpy(&chan->dst, &hcon->dst); 7298 chan->src_type = bdaddr_src_type(hcon); 7299 chan->dst_type = dst_type; 7300 7301 __l2cap_chan_add(conn, chan); 7302 } 7303 7304 l2cap_chan_unlock(pchan); 7305 next: 7306 next = l2cap_global_fixed_chan(pchan, hcon); 7307 l2cap_chan_put(pchan); 7308 pchan = next; 7309 } 7310 7311 l2cap_conn_ready(conn); 7312 } 7313 7314 int l2cap_disconn_ind(struct hci_conn *hcon) 7315 { 7316 struct l2cap_conn *conn = hcon->l2cap_data; 7317 7318 BT_DBG("hcon %p", hcon); 7319 7320 if (!conn) 7321 return HCI_ERROR_REMOTE_USER_TERM; 7322 return conn->disc_reason; 7323 } 7324 7325 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) 7326 { 7327 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 7328 return; 7329 7330 BT_DBG("hcon %p reason %d", hcon, reason); 7331 7332 l2cap_conn_del(hcon, bt_to_errno(reason)); 7333 } 7334 7335 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 7336 { 7337 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 7338 return; 7339 7340 if (encrypt == 0x00) { 7341 if (chan->sec_level == BT_SECURITY_MEDIUM) { 7342 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 7343 } else if (chan->sec_level == BT_SECURITY_HIGH || 7344 chan->sec_level == BT_SECURITY_FIPS) 7345 l2cap_chan_close(chan, ECONNREFUSED); 7346 } else { 7347 if (chan->sec_level == BT_SECURITY_MEDIUM) 7348 __clear_chan_timer(chan); 7349 } 7350 } 7351 7352 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 7353 { 7354 struct l2cap_conn *conn = hcon->l2cap_data; 7355 struct l2cap_chan *chan; 7356 7357 if (!conn) 7358 return; 7359 7360 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); 7361 7362 mutex_lock(&conn->chan_lock); 7363 7364 list_for_each_entry(chan, &conn->chan_l, list) { 7365 l2cap_chan_lock(chan); 7366 7367 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, 7368 state_to_string(chan->state)); 7369 7370 if (chan->scid == L2CAP_CID_A2MP) { 7371 l2cap_chan_unlock(chan); 7372 continue; 7373 } 7374 7375 if (!status && encrypt) 7376 chan->sec_level = hcon->sec_level; 7377 7378 if (!__l2cap_no_conn_pending(chan)) { 7379 l2cap_chan_unlock(chan); 7380 continue; 7381 } 7382 7383 if (!status && (chan->state == BT_CONNECTED || 7384 chan->state == BT_CONFIG)) { 7385 chan->ops->resume(chan); 7386 l2cap_check_encryption(chan, encrypt); 7387 l2cap_chan_unlock(chan); 7388 continue; 7389 } 7390 7391 if (chan->state == BT_CONNECT) { 7392 if (!status) 7393 l2cap_start_connection(chan); 7394 else 7395 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 7396 } else if (chan->state == BT_CONNECT2 && 7397 chan->mode != L2CAP_MODE_LE_FLOWCTL) { 7398 struct l2cap_conn_rsp rsp; 7399 __u16 res, stat; 7400 7401 if (!status) { 7402 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 7403 res = L2CAP_CR_PEND; 7404 stat = L2CAP_CS_AUTHOR_PEND; 7405 chan->ops->defer(chan); 7406 } else { 7407 l2cap_state_change(chan, BT_CONFIG); 7408 res = L2CAP_CR_SUCCESS; 7409 stat = L2CAP_CS_NO_INFO; 7410 } 7411 } else { 7412 l2cap_state_change(chan, BT_DISCONN); 7413 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 7414 res = L2CAP_CR_SEC_BLOCK; 7415 stat = L2CAP_CS_NO_INFO; 7416 } 7417 7418 rsp.scid = cpu_to_le16(chan->dcid); 7419 rsp.dcid = cpu_to_le16(chan->scid); 7420 rsp.result = cpu_to_le16(res); 7421 rsp.status = cpu_to_le16(stat); 7422 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 7423 sizeof(rsp), &rsp); 7424 7425 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && 7426 res == L2CAP_CR_SUCCESS) { 7427 char buf[128]; 7428 set_bit(CONF_REQ_SENT, &chan->conf_state); 7429 l2cap_send_cmd(conn, l2cap_get_ident(conn), 7430 L2CAP_CONF_REQ, 7431 l2cap_build_conf_req(chan, buf), 7432 buf); 7433 chan->num_conf_req++; 7434 } 7435 } 7436 7437 l2cap_chan_unlock(chan); 7438 } 7439 7440 mutex_unlock(&conn->chan_lock); 7441 } 7442 7443 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) 7444 { 7445 struct l2cap_conn *conn = hcon->l2cap_data; 7446 struct l2cap_hdr *hdr; 7447 int len; 7448 7449 /* For AMP controller do not create l2cap conn */ 7450 if (!conn && hcon->hdev->dev_type != HCI_BREDR) 7451 goto drop; 7452 7453 if (!conn) 7454 conn = l2cap_conn_add(hcon); 7455 7456 if (!conn) 7457 goto drop; 7458 7459 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); 7460 7461 switch (flags) { 7462 case ACL_START: 7463 case ACL_START_NO_FLUSH: 7464 case ACL_COMPLETE: 7465 if (conn->rx_len) { 7466 BT_ERR("Unexpected start frame (len %d)", skb->len); 7467 kfree_skb(conn->rx_skb); 7468 conn->rx_skb = NULL; 7469 conn->rx_len = 0; 7470 l2cap_conn_unreliable(conn, ECOMM); 7471 } 7472 7473 /* Start fragment always begin with Basic L2CAP header */ 7474 if (skb->len < L2CAP_HDR_SIZE) { 7475 BT_ERR("Frame is too short (len %d)", skb->len); 7476 l2cap_conn_unreliable(conn, ECOMM); 7477 goto drop; 7478 } 7479 7480 hdr = (struct l2cap_hdr *) skb->data; 7481 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; 7482 7483 if (len == skb->len) { 7484 /* Complete frame received */ 7485 l2cap_recv_frame(conn, skb); 7486 return; 7487 } 7488 7489 BT_DBG("Start: total len %d, frag len %d", len, skb->len); 7490 7491 if (skb->len > len) { 7492 BT_ERR("Frame is too long (len %d, expected len %d)", 7493 skb->len, len); 7494 l2cap_conn_unreliable(conn, ECOMM); 7495 goto drop; 7496 } 7497 7498 /* Allocate skb for the complete frame (with header) */ 7499 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); 7500 if (!conn->rx_skb) 7501 goto drop; 7502 7503 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 7504 skb->len); 7505 conn->rx_len = len - skb->len; 7506 break; 7507 7508 case ACL_CONT: 7509 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); 7510 7511 if (!conn->rx_len) { 7512 BT_ERR("Unexpected continuation frame (len %d)", skb->len); 7513 l2cap_conn_unreliable(conn, ECOMM); 7514 goto drop; 7515 } 7516 7517 if (skb->len > conn->rx_len) { 7518 BT_ERR("Fragment is too long (len %d, expected %d)", 7519 skb->len, conn->rx_len); 7520 kfree_skb(conn->rx_skb); 7521 conn->rx_skb = NULL; 7522 conn->rx_len = 0; 7523 l2cap_conn_unreliable(conn, ECOMM); 7524 goto drop; 7525 } 7526 7527 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 7528 skb->len); 7529 conn->rx_len -= skb->len; 7530 7531 if (!conn->rx_len) { 7532 /* Complete frame received. l2cap_recv_frame 7533 * takes ownership of the skb so set the global 7534 * rx_skb pointer to NULL first. 7535 */ 7536 struct sk_buff *rx_skb = conn->rx_skb; 7537 conn->rx_skb = NULL; 7538 l2cap_recv_frame(conn, rx_skb); 7539 } 7540 break; 7541 } 7542 7543 drop: 7544 kfree_skb(skb); 7545 } 7546 7547 static struct hci_cb l2cap_cb = { 7548 .name = "L2CAP", 7549 .connect_cfm = l2cap_connect_cfm, 7550 .disconn_cfm = l2cap_disconn_cfm, 7551 .security_cfm = l2cap_security_cfm, 7552 }; 7553 7554 static int l2cap_debugfs_show(struct seq_file *f, void *p) 7555 { 7556 struct l2cap_chan *c; 7557 7558 read_lock(&chan_list_lock); 7559 7560 list_for_each_entry(c, &chan_list, global_l) { 7561 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 7562 &c->src, c->src_type, &c->dst, c->dst_type, 7563 c->state, __le16_to_cpu(c->psm), 7564 c->scid, c->dcid, c->imtu, c->omtu, 7565 c->sec_level, c->mode); 7566 } 7567 7568 read_unlock(&chan_list_lock); 7569 7570 return 0; 7571 } 7572 7573 static int l2cap_debugfs_open(struct inode *inode, struct file *file) 7574 { 7575 return single_open(file, l2cap_debugfs_show, inode->i_private); 7576 } 7577 7578 static const struct file_operations l2cap_debugfs_fops = { 7579 .open = l2cap_debugfs_open, 7580 .read = seq_read, 7581 .llseek = seq_lseek, 7582 .release = single_release, 7583 }; 7584 7585 static struct dentry *l2cap_debugfs; 7586 7587 int __init l2cap_init(void) 7588 { 7589 int err; 7590 7591 err = l2cap_init_sockets(); 7592 if (err < 0) 7593 return err; 7594 7595 hci_register_cb(&l2cap_cb); 7596 7597 if (IS_ERR_OR_NULL(bt_debugfs)) 7598 return 0; 7599 7600 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, 7601 NULL, &l2cap_debugfs_fops); 7602 7603 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs, 7604 &le_max_credits); 7605 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs, 7606 &le_default_mps); 7607 7608 return 0; 7609 } 7610 7611 void l2cap_exit(void) 7612 { 7613 debugfs_remove(l2cap_debugfs); 7614 hci_unregister_cb(&l2cap_cb); 7615 l2cap_cleanup_sockets(); 7616 } 7617 7618 module_param(disable_ertm, bool, 0644); 7619 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 7620