1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 Copyright 2023 NXP
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "smp.h"
40 #include "msft.h"
41 #include "eir.h"
42
43 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44 "\x00\x00\x00\x00\x00\x00\x00\x00"
45
46 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47
48 /* Handle HCI Event packets */
49
hci_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)50 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
51 u8 ev, size_t len)
52 {
53 void *data;
54
55 data = skb_pull_data(skb, len);
56 if (!data)
57 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
58
59 return data;
60 }
61
hci_cc_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u16 op,size_t len)62 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
63 u16 op, size_t len)
64 {
65 void *data;
66
67 data = skb_pull_data(skb, len);
68 if (!data)
69 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
70
71 return data;
72 }
73
hci_le_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)74 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
75 u8 ev, size_t len)
76 {
77 void *data;
78
79 data = skb_pull_data(skb, len);
80 if (!data)
81 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
82
83 return data;
84 }
85
hci_cc_inquiry_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)86 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 struct sk_buff *skb)
88 {
89 struct hci_ev_status *rp = data;
90
91 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
92
93 /* It is possible that we receive Inquiry Complete event right
94 * before we receive Inquiry Cancel Command Complete event, in
95 * which case the latter event should have status of Command
96 * Disallowed (0x0c). This should not be treated as error, since
97 * we actually achieve what Inquiry Cancel wants to achieve,
98 * which is to end the last Inquiry session.
99 */
100 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
102 rp->status = 0x00;
103 }
104
105 if (rp->status)
106 return rp->status;
107
108 clear_bit(HCI_INQUIRY, &hdev->flags);
109 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110 wake_up_bit(&hdev->flags, HCI_INQUIRY);
111
112 hci_dev_lock(hdev);
113 /* Set discovery state to stopped if we're not doing LE active
114 * scanning.
115 */
116 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117 hdev->le_scan_type != LE_SCAN_ACTIVE)
118 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119 hci_dev_unlock(hdev);
120
121 hci_conn_check_pending(hdev);
122
123 return rp->status;
124 }
125
hci_cc_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)126 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 struct sk_buff *skb)
128 {
129 struct hci_ev_status *rp = data;
130
131 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
132
133 if (rp->status)
134 return rp->status;
135
136 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
137
138 return rp->status;
139 }
140
hci_cc_exit_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)141 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 struct sk_buff *skb)
143 {
144 struct hci_ev_status *rp = data;
145
146 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
147
148 if (rp->status)
149 return rp->status;
150
151 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
152
153 hci_conn_check_pending(hdev);
154
155 return rp->status;
156 }
157
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)158 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
159 struct sk_buff *skb)
160 {
161 struct hci_ev_status *rp = data;
162
163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
164
165 return rp->status;
166 }
167
hci_cc_role_discovery(struct hci_dev * hdev,void * data,struct sk_buff * skb)168 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
169 struct sk_buff *skb)
170 {
171 struct hci_rp_role_discovery *rp = data;
172 struct hci_conn *conn;
173
174 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
175
176 if (rp->status)
177 return rp->status;
178
179 hci_dev_lock(hdev);
180
181 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
182 if (conn)
183 conn->role = rp->role;
184
185 hci_dev_unlock(hdev);
186
187 return rp->status;
188 }
189
hci_cc_read_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)190 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
191 struct sk_buff *skb)
192 {
193 struct hci_rp_read_link_policy *rp = data;
194 struct hci_conn *conn;
195
196 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
197
198 if (rp->status)
199 return rp->status;
200
201 hci_dev_lock(hdev);
202
203 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
204 if (conn)
205 conn->link_policy = __le16_to_cpu(rp->policy);
206
207 hci_dev_unlock(hdev);
208
209 return rp->status;
210 }
211
hci_cc_write_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)212 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
213 struct sk_buff *skb)
214 {
215 struct hci_rp_write_link_policy *rp = data;
216 struct hci_conn *conn;
217 void *sent;
218
219 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
220
221 if (rp->status)
222 return rp->status;
223
224 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
225 if (!sent)
226 return rp->status;
227
228 hci_dev_lock(hdev);
229
230 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
231 if (conn)
232 conn->link_policy = get_unaligned_le16(sent + 2);
233
234 hci_dev_unlock(hdev);
235
236 return rp->status;
237 }
238
hci_cc_read_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)239 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
240 struct sk_buff *skb)
241 {
242 struct hci_rp_read_def_link_policy *rp = data;
243
244 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
245
246 if (rp->status)
247 return rp->status;
248
249 hdev->link_policy = __le16_to_cpu(rp->policy);
250
251 return rp->status;
252 }
253
hci_cc_write_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)254 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
255 struct sk_buff *skb)
256 {
257 struct hci_ev_status *rp = data;
258 void *sent;
259
260 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
261
262 if (rp->status)
263 return rp->status;
264
265 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
266 if (!sent)
267 return rp->status;
268
269 hdev->link_policy = get_unaligned_le16(sent);
270
271 return rp->status;
272 }
273
hci_cc_reset(struct hci_dev * hdev,void * data,struct sk_buff * skb)274 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
275 {
276 struct hci_ev_status *rp = data;
277
278 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
279
280 clear_bit(HCI_RESET, &hdev->flags);
281
282 if (rp->status)
283 return rp->status;
284
285 /* Reset all non-persistent flags */
286 hci_dev_clear_volatile_flags(hdev);
287
288 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
289
290 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
291 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
292
293 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
294 hdev->adv_data_len = 0;
295
296 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
297 hdev->scan_rsp_data_len = 0;
298
299 hdev->le_scan_type = LE_SCAN_PASSIVE;
300
301 hdev->ssp_debug_mode = 0;
302
303 hci_bdaddr_list_clear(&hdev->le_accept_list);
304 hci_bdaddr_list_clear(&hdev->le_resolv_list);
305
306 return rp->status;
307 }
308
hci_cc_read_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)309 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
310 struct sk_buff *skb)
311 {
312 struct hci_rp_read_stored_link_key *rp = data;
313 struct hci_cp_read_stored_link_key *sent;
314
315 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
316
317 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
318 if (!sent)
319 return rp->status;
320
321 if (!rp->status && sent->read_all == 0x01) {
322 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
323 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
324 }
325
326 return rp->status;
327 }
328
hci_cc_delete_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)329 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
330 struct sk_buff *skb)
331 {
332 struct hci_rp_delete_stored_link_key *rp = data;
333 u16 num_keys;
334
335 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
336
337 if (rp->status)
338 return rp->status;
339
340 num_keys = le16_to_cpu(rp->num_keys);
341
342 if (num_keys <= hdev->stored_num_keys)
343 hdev->stored_num_keys -= num_keys;
344 else
345 hdev->stored_num_keys = 0;
346
347 return rp->status;
348 }
349
hci_cc_write_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)350 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
351 struct sk_buff *skb)
352 {
353 struct hci_ev_status *rp = data;
354 void *sent;
355
356 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
357
358 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
359 if (!sent)
360 return rp->status;
361
362 hci_dev_lock(hdev);
363
364 if (hci_dev_test_flag(hdev, HCI_MGMT))
365 mgmt_set_local_name_complete(hdev, sent, rp->status);
366 else if (!rp->status)
367 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
368
369 hci_dev_unlock(hdev);
370
371 return rp->status;
372 }
373
hci_cc_read_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)374 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
375 struct sk_buff *skb)
376 {
377 struct hci_rp_read_local_name *rp = data;
378
379 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
380
381 if (rp->status)
382 return rp->status;
383
384 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
385 hci_dev_test_flag(hdev, HCI_CONFIG))
386 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
387
388 return rp->status;
389 }
390
hci_cc_write_auth_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)391 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
392 struct sk_buff *skb)
393 {
394 struct hci_ev_status *rp = data;
395 void *sent;
396
397 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
398
399 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
400 if (!sent)
401 return rp->status;
402
403 hci_dev_lock(hdev);
404
405 if (!rp->status) {
406 __u8 param = *((__u8 *) sent);
407
408 if (param == AUTH_ENABLED)
409 set_bit(HCI_AUTH, &hdev->flags);
410 else
411 clear_bit(HCI_AUTH, &hdev->flags);
412 }
413
414 if (hci_dev_test_flag(hdev, HCI_MGMT))
415 mgmt_auth_enable_complete(hdev, rp->status);
416
417 hci_dev_unlock(hdev);
418
419 return rp->status;
420 }
421
hci_cc_write_encrypt_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)422 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
423 struct sk_buff *skb)
424 {
425 struct hci_ev_status *rp = data;
426 __u8 param;
427 void *sent;
428
429 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
430
431 if (rp->status)
432 return rp->status;
433
434 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
435 if (!sent)
436 return rp->status;
437
438 param = *((__u8 *) sent);
439
440 if (param)
441 set_bit(HCI_ENCRYPT, &hdev->flags);
442 else
443 clear_bit(HCI_ENCRYPT, &hdev->flags);
444
445 return rp->status;
446 }
447
hci_cc_write_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)448 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
449 struct sk_buff *skb)
450 {
451 struct hci_ev_status *rp = data;
452 __u8 param;
453 void *sent;
454
455 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
456
457 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
458 if (!sent)
459 return rp->status;
460
461 param = *((__u8 *) sent);
462
463 hci_dev_lock(hdev);
464
465 if (rp->status) {
466 hdev->discov_timeout = 0;
467 goto done;
468 }
469
470 if (param & SCAN_INQUIRY)
471 set_bit(HCI_ISCAN, &hdev->flags);
472 else
473 clear_bit(HCI_ISCAN, &hdev->flags);
474
475 if (param & SCAN_PAGE)
476 set_bit(HCI_PSCAN, &hdev->flags);
477 else
478 clear_bit(HCI_PSCAN, &hdev->flags);
479
480 done:
481 hci_dev_unlock(hdev);
482
483 return rp->status;
484 }
485
hci_cc_set_event_filter(struct hci_dev * hdev,void * data,struct sk_buff * skb)486 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
487 struct sk_buff *skb)
488 {
489 struct hci_ev_status *rp = data;
490 struct hci_cp_set_event_filter *cp;
491 void *sent;
492
493 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
494
495 if (rp->status)
496 return rp->status;
497
498 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
499 if (!sent)
500 return rp->status;
501
502 cp = (struct hci_cp_set_event_filter *)sent;
503
504 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
505 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
506 else
507 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508
509 return rp->status;
510 }
511
hci_cc_read_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)512 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
513 struct sk_buff *skb)
514 {
515 struct hci_rp_read_class_of_dev *rp = data;
516
517 if (WARN_ON(!hdev))
518 return HCI_ERROR_UNSPECIFIED;
519
520 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
521
522 if (rp->status)
523 return rp->status;
524
525 memcpy(hdev->dev_class, rp->dev_class, 3);
526
527 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
528 hdev->dev_class[1], hdev->dev_class[0]);
529
530 return rp->status;
531 }
532
hci_cc_write_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)533 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
534 struct sk_buff *skb)
535 {
536 struct hci_ev_status *rp = data;
537 void *sent;
538
539 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
540
541 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
542 if (!sent)
543 return rp->status;
544
545 hci_dev_lock(hdev);
546
547 if (!rp->status)
548 memcpy(hdev->dev_class, sent, 3);
549
550 if (hci_dev_test_flag(hdev, HCI_MGMT))
551 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
552
553 hci_dev_unlock(hdev);
554
555 return rp->status;
556 }
557
hci_cc_read_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)558 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
559 struct sk_buff *skb)
560 {
561 struct hci_rp_read_voice_setting *rp = data;
562 __u16 setting;
563
564 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
565
566 if (rp->status)
567 return rp->status;
568
569 setting = __le16_to_cpu(rp->voice_setting);
570
571 if (hdev->voice_setting == setting)
572 return rp->status;
573
574 hdev->voice_setting = setting;
575
576 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
577
578 if (hdev->notify)
579 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
580
581 return rp->status;
582 }
583
hci_cc_write_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)584 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
585 struct sk_buff *skb)
586 {
587 struct hci_ev_status *rp = data;
588 __u16 setting;
589 void *sent;
590
591 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
592
593 if (rp->status)
594 return rp->status;
595
596 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
597 if (!sent)
598 return rp->status;
599
600 setting = get_unaligned_le16(sent);
601
602 if (hdev->voice_setting == setting)
603 return rp->status;
604
605 hdev->voice_setting = setting;
606
607 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
608
609 if (hdev->notify)
610 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
611
612 return rp->status;
613 }
614
hci_cc_read_num_supported_iac(struct hci_dev * hdev,void * data,struct sk_buff * skb)615 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
616 struct sk_buff *skb)
617 {
618 struct hci_rp_read_num_supported_iac *rp = data;
619
620 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
621
622 if (rp->status)
623 return rp->status;
624
625 hdev->num_iac = rp->num_iac;
626
627 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
628
629 return rp->status;
630 }
631
hci_cc_write_ssp_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)632 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
633 struct sk_buff *skb)
634 {
635 struct hci_ev_status *rp = data;
636 struct hci_cp_write_ssp_mode *sent;
637
638 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
639
640 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
641 if (!sent)
642 return rp->status;
643
644 hci_dev_lock(hdev);
645
646 if (!rp->status) {
647 if (sent->mode)
648 hdev->features[1][0] |= LMP_HOST_SSP;
649 else
650 hdev->features[1][0] &= ~LMP_HOST_SSP;
651 }
652
653 if (!rp->status) {
654 if (sent->mode)
655 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
656 else
657 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
658 }
659
660 hci_dev_unlock(hdev);
661
662 return rp->status;
663 }
664
hci_cc_write_sc_support(struct hci_dev * hdev,void * data,struct sk_buff * skb)665 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
666 struct sk_buff *skb)
667 {
668 struct hci_ev_status *rp = data;
669 struct hci_cp_write_sc_support *sent;
670
671 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
672
673 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
674 if (!sent)
675 return rp->status;
676
677 hci_dev_lock(hdev);
678
679 if (!rp->status) {
680 if (sent->support)
681 hdev->features[1][0] |= LMP_HOST_SC;
682 else
683 hdev->features[1][0] &= ~LMP_HOST_SC;
684 }
685
686 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
687 if (sent->support)
688 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
689 else
690 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
691 }
692
693 hci_dev_unlock(hdev);
694
695 return rp->status;
696 }
697
hci_cc_read_local_version(struct hci_dev * hdev,void * data,struct sk_buff * skb)698 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
699 struct sk_buff *skb)
700 {
701 struct hci_rp_read_local_version *rp = data;
702
703 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
704
705 if (rp->status)
706 return rp->status;
707
708 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
709 hci_dev_test_flag(hdev, HCI_CONFIG)) {
710 hdev->hci_ver = rp->hci_ver;
711 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
712 hdev->lmp_ver = rp->lmp_ver;
713 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
714 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
715 }
716
717 return rp->status;
718 }
719
hci_cc_read_enc_key_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)720 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
721 struct sk_buff *skb)
722 {
723 struct hci_rp_read_enc_key_size *rp = data;
724 struct hci_conn *conn;
725 u16 handle;
726 u8 status = rp->status;
727
728 bt_dev_dbg(hdev, "status 0x%2.2x", status);
729
730 handle = le16_to_cpu(rp->handle);
731
732 hci_dev_lock(hdev);
733
734 conn = hci_conn_hash_lookup_handle(hdev, handle);
735 if (!conn) {
736 status = 0xFF;
737 goto done;
738 }
739
740 /* While unexpected, the read_enc_key_size command may fail. The most
741 * secure approach is to then assume the key size is 0 to force a
742 * disconnection.
743 */
744 if (status) {
745 bt_dev_err(hdev, "failed to read key size for handle %u",
746 handle);
747 conn->enc_key_size = 0;
748 } else {
749 conn->enc_key_size = rp->key_size;
750 status = 0;
751
752 if (conn->enc_key_size < hdev->min_enc_key_size) {
753 /* As slave role, the conn->state has been set to
754 * BT_CONNECTED and l2cap conn req might not be received
755 * yet, at this moment the l2cap layer almost does
756 * nothing with the non-zero status.
757 * So we also clear encrypt related bits, and then the
758 * handler of l2cap conn req will get the right secure
759 * state at a later time.
760 */
761 status = HCI_ERROR_AUTH_FAILURE;
762 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764 }
765 }
766
767 hci_encrypt_cfm(conn, status);
768
769 done:
770 hci_dev_unlock(hdev);
771
772 return status;
773 }
774
hci_cc_read_local_commands(struct hci_dev * hdev,void * data,struct sk_buff * skb)775 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
776 struct sk_buff *skb)
777 {
778 struct hci_rp_read_local_commands *rp = data;
779
780 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
781
782 if (rp->status)
783 return rp->status;
784
785 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
786 hci_dev_test_flag(hdev, HCI_CONFIG))
787 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
788
789 return rp->status;
790 }
791
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)792 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
793 struct sk_buff *skb)
794 {
795 struct hci_rp_read_auth_payload_to *rp = data;
796 struct hci_conn *conn;
797
798 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
799
800 if (rp->status)
801 return rp->status;
802
803 hci_dev_lock(hdev);
804
805 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
806 if (conn)
807 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
808
809 hci_dev_unlock(hdev);
810
811 return rp->status;
812 }
813
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)814 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
815 struct sk_buff *skb)
816 {
817 struct hci_rp_write_auth_payload_to *rp = data;
818 struct hci_conn *conn;
819 void *sent;
820
821 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
822
823 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
824 if (!sent)
825 return rp->status;
826
827 hci_dev_lock(hdev);
828
829 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
830 if (!conn) {
831 rp->status = 0xff;
832 goto unlock;
833 }
834
835 if (!rp->status)
836 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
837
838 unlock:
839 hci_dev_unlock(hdev);
840
841 return rp->status;
842 }
843
hci_cc_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)844 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
845 struct sk_buff *skb)
846 {
847 struct hci_rp_read_local_features *rp = data;
848
849 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
850
851 if (rp->status)
852 return rp->status;
853
854 memcpy(hdev->features, rp->features, 8);
855
856 /* Adjust default settings according to features
857 * supported by device. */
858
859 if (hdev->features[0][0] & LMP_3SLOT)
860 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
861
862 if (hdev->features[0][0] & LMP_5SLOT)
863 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
864
865 if (hdev->features[0][1] & LMP_HV2) {
866 hdev->pkt_type |= (HCI_HV2);
867 hdev->esco_type |= (ESCO_HV2);
868 }
869
870 if (hdev->features[0][1] & LMP_HV3) {
871 hdev->pkt_type |= (HCI_HV3);
872 hdev->esco_type |= (ESCO_HV3);
873 }
874
875 if (lmp_esco_capable(hdev))
876 hdev->esco_type |= (ESCO_EV3);
877
878 if (hdev->features[0][4] & LMP_EV4)
879 hdev->esco_type |= (ESCO_EV4);
880
881 if (hdev->features[0][4] & LMP_EV5)
882 hdev->esco_type |= (ESCO_EV5);
883
884 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
885 hdev->esco_type |= (ESCO_2EV3);
886
887 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
888 hdev->esco_type |= (ESCO_3EV3);
889
890 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
891 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
892
893 return rp->status;
894 }
895
hci_cc_read_local_ext_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)896 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
897 struct sk_buff *skb)
898 {
899 struct hci_rp_read_local_ext_features *rp = data;
900
901 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
902
903 if (rp->status)
904 return rp->status;
905
906 if (hdev->max_page < rp->max_page) {
907 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
908 &hdev->quirks))
909 bt_dev_warn(hdev, "broken local ext features page 2");
910 else
911 hdev->max_page = rp->max_page;
912 }
913
914 if (rp->page < HCI_MAX_PAGES)
915 memcpy(hdev->features[rp->page], rp->features, 8);
916
917 return rp->status;
918 }
919
hci_cc_read_flow_control_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)920 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
921 struct sk_buff *skb)
922 {
923 struct hci_rp_read_flow_control_mode *rp = data;
924
925 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
926
927 if (rp->status)
928 return rp->status;
929
930 hdev->flow_ctl_mode = rp->mode;
931
932 return rp->status;
933 }
934
hci_cc_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)935 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
936 struct sk_buff *skb)
937 {
938 struct hci_rp_read_buffer_size *rp = data;
939
940 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
941
942 if (rp->status)
943 return rp->status;
944
945 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
946 hdev->sco_mtu = rp->sco_mtu;
947 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
948 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
949
950 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
951 hdev->sco_mtu = 64;
952 hdev->sco_pkts = 8;
953 }
954
955 hdev->acl_cnt = hdev->acl_pkts;
956 hdev->sco_cnt = hdev->sco_pkts;
957
958 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
959 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
960
961 if (!hdev->acl_mtu || !hdev->acl_pkts)
962 return HCI_ERROR_INVALID_PARAMETERS;
963
964 return rp->status;
965 }
966
hci_cc_read_bd_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)967 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
968 struct sk_buff *skb)
969 {
970 struct hci_rp_read_bd_addr *rp = data;
971
972 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
973
974 if (rp->status)
975 return rp->status;
976
977 if (test_bit(HCI_INIT, &hdev->flags))
978 bacpy(&hdev->bdaddr, &rp->bdaddr);
979
980 if (hci_dev_test_flag(hdev, HCI_SETUP))
981 bacpy(&hdev->setup_addr, &rp->bdaddr);
982
983 return rp->status;
984 }
985
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,void * data,struct sk_buff * skb)986 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
987 struct sk_buff *skb)
988 {
989 struct hci_rp_read_local_pairing_opts *rp = data;
990
991 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
992
993 if (rp->status)
994 return rp->status;
995
996 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
997 hci_dev_test_flag(hdev, HCI_CONFIG)) {
998 hdev->pairing_opts = rp->pairing_opts;
999 hdev->max_enc_key_size = rp->max_key_size;
1000 }
1001
1002 return rp->status;
1003 }
1004
hci_cc_read_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1005 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1006 struct sk_buff *skb)
1007 {
1008 struct hci_rp_read_page_scan_activity *rp = data;
1009
1010 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1011
1012 if (rp->status)
1013 return rp->status;
1014
1015 if (test_bit(HCI_INIT, &hdev->flags)) {
1016 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1017 hdev->page_scan_window = __le16_to_cpu(rp->window);
1018 }
1019
1020 return rp->status;
1021 }
1022
hci_cc_write_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1023 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1024 struct sk_buff *skb)
1025 {
1026 struct hci_ev_status *rp = data;
1027 struct hci_cp_write_page_scan_activity *sent;
1028
1029 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1030
1031 if (rp->status)
1032 return rp->status;
1033
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1035 if (!sent)
1036 return rp->status;
1037
1038 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1039 hdev->page_scan_window = __le16_to_cpu(sent->window);
1040
1041 return rp->status;
1042 }
1043
hci_cc_read_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1044 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1045 struct sk_buff *skb)
1046 {
1047 struct hci_rp_read_page_scan_type *rp = data;
1048
1049 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1050
1051 if (rp->status)
1052 return rp->status;
1053
1054 if (test_bit(HCI_INIT, &hdev->flags))
1055 hdev->page_scan_type = rp->type;
1056
1057 return rp->status;
1058 }
1059
hci_cc_write_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1060 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1061 struct sk_buff *skb)
1062 {
1063 struct hci_ev_status *rp = data;
1064 u8 *type;
1065
1066 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1067
1068 if (rp->status)
1069 return rp->status;
1070
1071 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1072 if (type)
1073 hdev->page_scan_type = *type;
1074
1075 return rp->status;
1076 }
1077
hci_cc_read_data_block_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1078 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1079 struct sk_buff *skb)
1080 {
1081 struct hci_rp_read_data_block_size *rp = data;
1082
1083 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1084
1085 if (rp->status)
1086 return rp->status;
1087
1088 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1089 hdev->block_len = __le16_to_cpu(rp->block_len);
1090 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1091
1092 hdev->block_cnt = hdev->num_blocks;
1093
1094 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1095 hdev->block_cnt, hdev->block_len);
1096
1097 return rp->status;
1098 }
1099
hci_cc_read_clock(struct hci_dev * hdev,void * data,struct sk_buff * skb)1100 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1101 struct sk_buff *skb)
1102 {
1103 struct hci_rp_read_clock *rp = data;
1104 struct hci_cp_read_clock *cp;
1105 struct hci_conn *conn;
1106
1107 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1108
1109 if (rp->status)
1110 return rp->status;
1111
1112 hci_dev_lock(hdev);
1113
1114 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1115 if (!cp)
1116 goto unlock;
1117
1118 if (cp->which == 0x00) {
1119 hdev->clock = le32_to_cpu(rp->clock);
1120 goto unlock;
1121 }
1122
1123 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1124 if (conn) {
1125 conn->clock = le32_to_cpu(rp->clock);
1126 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1127 }
1128
1129 unlock:
1130 hci_dev_unlock(hdev);
1131 return rp->status;
1132 }
1133
hci_cc_read_local_amp_info(struct hci_dev * hdev,void * data,struct sk_buff * skb)1134 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1135 struct sk_buff *skb)
1136 {
1137 struct hci_rp_read_local_amp_info *rp = data;
1138
1139 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1140
1141 if (rp->status)
1142 return rp->status;
1143
1144 hdev->amp_status = rp->amp_status;
1145 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1146 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1147 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1148 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1149 hdev->amp_type = rp->amp_type;
1150 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1151 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1152 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1153 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1154
1155 return rp->status;
1156 }
1157
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1158 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1159 struct sk_buff *skb)
1160 {
1161 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1162
1163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1164
1165 if (rp->status)
1166 return rp->status;
1167
1168 hdev->inq_tx_power = rp->tx_power;
1169
1170 return rp->status;
1171 }
1172
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1173 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1174 struct sk_buff *skb)
1175 {
1176 struct hci_rp_read_def_err_data_reporting *rp = data;
1177
1178 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1179
1180 if (rp->status)
1181 return rp->status;
1182
1183 hdev->err_data_reporting = rp->err_data_reporting;
1184
1185 return rp->status;
1186 }
1187
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1188 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1189 struct sk_buff *skb)
1190 {
1191 struct hci_ev_status *rp = data;
1192 struct hci_cp_write_def_err_data_reporting *cp;
1193
1194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1195
1196 if (rp->status)
1197 return rp->status;
1198
1199 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1200 if (!cp)
1201 return rp->status;
1202
1203 hdev->err_data_reporting = cp->err_data_reporting;
1204
1205 return rp->status;
1206 }
1207
hci_cc_pin_code_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1208 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1209 struct sk_buff *skb)
1210 {
1211 struct hci_rp_pin_code_reply *rp = data;
1212 struct hci_cp_pin_code_reply *cp;
1213 struct hci_conn *conn;
1214
1215 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1216
1217 hci_dev_lock(hdev);
1218
1219 if (hci_dev_test_flag(hdev, HCI_MGMT))
1220 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1221
1222 if (rp->status)
1223 goto unlock;
1224
1225 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1226 if (!cp)
1227 goto unlock;
1228
1229 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1230 if (conn)
1231 conn->pin_length = cp->pin_len;
1232
1233 unlock:
1234 hci_dev_unlock(hdev);
1235 return rp->status;
1236 }
1237
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1238 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1239 struct sk_buff *skb)
1240 {
1241 struct hci_rp_pin_code_neg_reply *rp = data;
1242
1243 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1244
1245 hci_dev_lock(hdev);
1246
1247 if (hci_dev_test_flag(hdev, HCI_MGMT))
1248 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1249 rp->status);
1250
1251 hci_dev_unlock(hdev);
1252
1253 return rp->status;
1254 }
1255
hci_cc_le_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1256 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1257 struct sk_buff *skb)
1258 {
1259 struct hci_rp_le_read_buffer_size *rp = data;
1260
1261 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1262
1263 if (rp->status)
1264 return rp->status;
1265
1266 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1267 hdev->le_pkts = rp->le_max_pkt;
1268
1269 hdev->le_cnt = hdev->le_pkts;
1270
1271 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1272
1273 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1274 return HCI_ERROR_INVALID_PARAMETERS;
1275
1276 return rp->status;
1277 }
1278
hci_cc_le_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)1279 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1280 struct sk_buff *skb)
1281 {
1282 struct hci_rp_le_read_local_features *rp = data;
1283
1284 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1285
1286 if (rp->status)
1287 return rp->status;
1288
1289 memcpy(hdev->le_features, rp->features, 8);
1290
1291 return rp->status;
1292 }
1293
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1294 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1295 struct sk_buff *skb)
1296 {
1297 struct hci_rp_le_read_adv_tx_power *rp = data;
1298
1299 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1300
1301 if (rp->status)
1302 return rp->status;
1303
1304 hdev->adv_tx_power = rp->tx_power;
1305
1306 return rp->status;
1307 }
1308
hci_cc_user_confirm_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1309 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1310 struct sk_buff *skb)
1311 {
1312 struct hci_rp_user_confirm_reply *rp = data;
1313
1314 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315
1316 hci_dev_lock(hdev);
1317
1318 if (hci_dev_test_flag(hdev, HCI_MGMT))
1319 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1320 rp->status);
1321
1322 hci_dev_unlock(hdev);
1323
1324 return rp->status;
1325 }
1326
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1327 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1328 struct sk_buff *skb)
1329 {
1330 struct hci_rp_user_confirm_reply *rp = data;
1331
1332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333
1334 hci_dev_lock(hdev);
1335
1336 if (hci_dev_test_flag(hdev, HCI_MGMT))
1337 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1338 ACL_LINK, 0, rp->status);
1339
1340 hci_dev_unlock(hdev);
1341
1342 return rp->status;
1343 }
1344
hci_cc_user_passkey_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1345 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1346 struct sk_buff *skb)
1347 {
1348 struct hci_rp_user_confirm_reply *rp = data;
1349
1350 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1351
1352 hci_dev_lock(hdev);
1353
1354 if (hci_dev_test_flag(hdev, HCI_MGMT))
1355 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1356 0, rp->status);
1357
1358 hci_dev_unlock(hdev);
1359
1360 return rp->status;
1361 }
1362
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1363 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1364 struct sk_buff *skb)
1365 {
1366 struct hci_rp_user_confirm_reply *rp = data;
1367
1368 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1369
1370 hci_dev_lock(hdev);
1371
1372 if (hci_dev_test_flag(hdev, HCI_MGMT))
1373 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1374 ACL_LINK, 0, rp->status);
1375
1376 hci_dev_unlock(hdev);
1377
1378 return rp->status;
1379 }
1380
hci_cc_read_local_oob_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1381 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1382 struct sk_buff *skb)
1383 {
1384 struct hci_rp_read_local_oob_data *rp = data;
1385
1386 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1387
1388 return rp->status;
1389 }
1390
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1391 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1392 struct sk_buff *skb)
1393 {
1394 struct hci_rp_read_local_oob_ext_data *rp = data;
1395
1396 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1397
1398 return rp->status;
1399 }
1400
hci_cc_le_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1401 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1402 struct sk_buff *skb)
1403 {
1404 struct hci_ev_status *rp = data;
1405 bdaddr_t *sent;
1406
1407 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1408
1409 if (rp->status)
1410 return rp->status;
1411
1412 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1413 if (!sent)
1414 return rp->status;
1415
1416 hci_dev_lock(hdev);
1417
1418 bacpy(&hdev->random_addr, sent);
1419
1420 if (!bacmp(&hdev->rpa, sent)) {
1421 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1422 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1423 secs_to_jiffies(hdev->rpa_timeout));
1424 }
1425
1426 hci_dev_unlock(hdev);
1427
1428 return rp->status;
1429 }
1430
hci_cc_le_set_default_phy(struct hci_dev * hdev,void * data,struct sk_buff * skb)1431 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1432 struct sk_buff *skb)
1433 {
1434 struct hci_ev_status *rp = data;
1435 struct hci_cp_le_set_default_phy *cp;
1436
1437 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1438
1439 if (rp->status)
1440 return rp->status;
1441
1442 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1443 if (!cp)
1444 return rp->status;
1445
1446 hci_dev_lock(hdev);
1447
1448 hdev->le_tx_def_phys = cp->tx_phys;
1449 hdev->le_rx_def_phys = cp->rx_phys;
1450
1451 hci_dev_unlock(hdev);
1452
1453 return rp->status;
1454 }
1455
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1456 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1457 struct sk_buff *skb)
1458 {
1459 struct hci_ev_status *rp = data;
1460 struct hci_cp_le_set_adv_set_rand_addr *cp;
1461 struct adv_info *adv;
1462
1463 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1464
1465 if (rp->status)
1466 return rp->status;
1467
1468 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1469 /* Update only in case the adv instance since handle 0x00 shall be using
1470 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1471 * non-extended adverting.
1472 */
1473 if (!cp || !cp->handle)
1474 return rp->status;
1475
1476 hci_dev_lock(hdev);
1477
1478 adv = hci_find_adv_instance(hdev, cp->handle);
1479 if (adv) {
1480 bacpy(&adv->random_addr, &cp->bdaddr);
1481 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1482 adv->rpa_expired = false;
1483 queue_delayed_work(hdev->workqueue,
1484 &adv->rpa_expired_cb,
1485 secs_to_jiffies(hdev->rpa_timeout));
1486 }
1487 }
1488
1489 hci_dev_unlock(hdev);
1490
1491 return rp->status;
1492 }
1493
hci_cc_le_remove_adv_set(struct hci_dev * hdev,void * data,struct sk_buff * skb)1494 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1495 struct sk_buff *skb)
1496 {
1497 struct hci_ev_status *rp = data;
1498 u8 *instance;
1499 int err;
1500
1501 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1502
1503 if (rp->status)
1504 return rp->status;
1505
1506 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1507 if (!instance)
1508 return rp->status;
1509
1510 hci_dev_lock(hdev);
1511
1512 err = hci_remove_adv_instance(hdev, *instance);
1513 if (!err)
1514 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1515 *instance);
1516
1517 hci_dev_unlock(hdev);
1518
1519 return rp->status;
1520 }
1521
hci_cc_le_clear_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1522 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1523 struct sk_buff *skb)
1524 {
1525 struct hci_ev_status *rp = data;
1526 struct adv_info *adv, *n;
1527 int err;
1528
1529 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1530
1531 if (rp->status)
1532 return rp->status;
1533
1534 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1535 return rp->status;
1536
1537 hci_dev_lock(hdev);
1538
1539 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1540 u8 instance = adv->instance;
1541
1542 err = hci_remove_adv_instance(hdev, instance);
1543 if (!err)
1544 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1545 hdev, instance);
1546 }
1547
1548 hci_dev_unlock(hdev);
1549
1550 return rp->status;
1551 }
1552
hci_cc_le_read_transmit_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1553 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1554 struct sk_buff *skb)
1555 {
1556 struct hci_rp_le_read_transmit_power *rp = data;
1557
1558 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1559
1560 if (rp->status)
1561 return rp->status;
1562
1563 hdev->min_le_tx_power = rp->min_le_tx_power;
1564 hdev->max_le_tx_power = rp->max_le_tx_power;
1565
1566 return rp->status;
1567 }
1568
hci_cc_le_set_privacy_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)1569 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1570 struct sk_buff *skb)
1571 {
1572 struct hci_ev_status *rp = data;
1573 struct hci_cp_le_set_privacy_mode *cp;
1574 struct hci_conn_params *params;
1575
1576 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1577
1578 if (rp->status)
1579 return rp->status;
1580
1581 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1582 if (!cp)
1583 return rp->status;
1584
1585 hci_dev_lock(hdev);
1586
1587 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1588 if (params)
1589 WRITE_ONCE(params->privacy_mode, cp->mode);
1590
1591 hci_dev_unlock(hdev);
1592
1593 return rp->status;
1594 }
1595
hci_cc_le_set_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1596 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1597 struct sk_buff *skb)
1598 {
1599 struct hci_ev_status *rp = data;
1600 __u8 *sent;
1601
1602 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1603
1604 if (rp->status)
1605 return rp->status;
1606
1607 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1608 if (!sent)
1609 return rp->status;
1610
1611 hci_dev_lock(hdev);
1612
1613 /* If we're doing connection initiation as peripheral. Set a
1614 * timeout in case something goes wrong.
1615 */
1616 if (*sent) {
1617 struct hci_conn *conn;
1618
1619 hci_dev_set_flag(hdev, HCI_LE_ADV);
1620
1621 conn = hci_lookup_le_connect(hdev);
1622 if (conn)
1623 queue_delayed_work(hdev->workqueue,
1624 &conn->le_conn_timeout,
1625 conn->conn_timeout);
1626 } else {
1627 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1628 }
1629
1630 hci_dev_unlock(hdev);
1631
1632 return rp->status;
1633 }
1634
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1635 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1636 struct sk_buff *skb)
1637 {
1638 struct hci_cp_le_set_ext_adv_enable *cp;
1639 struct hci_cp_ext_adv_set *set;
1640 struct adv_info *adv = NULL, *n;
1641 struct hci_ev_status *rp = data;
1642
1643 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1644
1645 if (rp->status)
1646 return rp->status;
1647
1648 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1649 if (!cp)
1650 return rp->status;
1651
1652 set = (void *)cp->data;
1653
1654 hci_dev_lock(hdev);
1655
1656 if (cp->num_of_sets)
1657 adv = hci_find_adv_instance(hdev, set->handle);
1658
1659 if (cp->enable) {
1660 struct hci_conn *conn;
1661
1662 hci_dev_set_flag(hdev, HCI_LE_ADV);
1663
1664 if (adv && !adv->periodic)
1665 adv->enabled = true;
1666
1667 conn = hci_lookup_le_connect(hdev);
1668 if (conn)
1669 queue_delayed_work(hdev->workqueue,
1670 &conn->le_conn_timeout,
1671 conn->conn_timeout);
1672 } else {
1673 if (cp->num_of_sets) {
1674 if (adv)
1675 adv->enabled = false;
1676
1677 /* If just one instance was disabled check if there are
1678 * any other instance enabled before clearing HCI_LE_ADV
1679 */
1680 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1681 list) {
1682 if (adv->enabled)
1683 goto unlock;
1684 }
1685 } else {
1686 /* All instances shall be considered disabled */
1687 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1688 list)
1689 adv->enabled = false;
1690 }
1691
1692 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1693 }
1694
1695 unlock:
1696 hci_dev_unlock(hdev);
1697 return rp->status;
1698 }
1699
hci_cc_le_set_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1700 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1701 struct sk_buff *skb)
1702 {
1703 struct hci_cp_le_set_scan_param *cp;
1704 struct hci_ev_status *rp = data;
1705
1706 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1707
1708 if (rp->status)
1709 return rp->status;
1710
1711 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1712 if (!cp)
1713 return rp->status;
1714
1715 hci_dev_lock(hdev);
1716
1717 hdev->le_scan_type = cp->type;
1718
1719 hci_dev_unlock(hdev);
1720
1721 return rp->status;
1722 }
1723
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1724 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1725 struct sk_buff *skb)
1726 {
1727 struct hci_cp_le_set_ext_scan_params *cp;
1728 struct hci_ev_status *rp = data;
1729 struct hci_cp_le_scan_phy_params *phy_param;
1730
1731 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1732
1733 if (rp->status)
1734 return rp->status;
1735
1736 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1737 if (!cp)
1738 return rp->status;
1739
1740 phy_param = (void *)cp->data;
1741
1742 hci_dev_lock(hdev);
1743
1744 hdev->le_scan_type = phy_param->type;
1745
1746 hci_dev_unlock(hdev);
1747
1748 return rp->status;
1749 }
1750
has_pending_adv_report(struct hci_dev * hdev)1751 static bool has_pending_adv_report(struct hci_dev *hdev)
1752 {
1753 struct discovery_state *d = &hdev->discovery;
1754
1755 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1756 }
1757
clear_pending_adv_report(struct hci_dev * hdev)1758 static void clear_pending_adv_report(struct hci_dev *hdev)
1759 {
1760 struct discovery_state *d = &hdev->discovery;
1761
1762 bacpy(&d->last_adv_addr, BDADDR_ANY);
1763 d->last_adv_data_len = 0;
1764 }
1765
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1766 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1767 u8 bdaddr_type, s8 rssi, u32 flags,
1768 u8 *data, u8 len)
1769 {
1770 struct discovery_state *d = &hdev->discovery;
1771
1772 if (len > max_adv_len(hdev))
1773 return;
1774
1775 bacpy(&d->last_adv_addr, bdaddr);
1776 d->last_adv_addr_type = bdaddr_type;
1777 d->last_adv_rssi = rssi;
1778 d->last_adv_flags = flags;
1779 memcpy(d->last_adv_data, data, len);
1780 d->last_adv_data_len = len;
1781 }
1782
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1783 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1784 {
1785 hci_dev_lock(hdev);
1786
1787 switch (enable) {
1788 case LE_SCAN_ENABLE:
1789 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1790 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1791 clear_pending_adv_report(hdev);
1792 if (hci_dev_test_flag(hdev, HCI_MESH))
1793 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1794 break;
1795
1796 case LE_SCAN_DISABLE:
1797 /* We do this here instead of when setting DISCOVERY_STOPPED
1798 * since the latter would potentially require waiting for
1799 * inquiry to stop too.
1800 */
1801 if (has_pending_adv_report(hdev)) {
1802 struct discovery_state *d = &hdev->discovery;
1803
1804 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1805 d->last_adv_addr_type, NULL,
1806 d->last_adv_rssi, d->last_adv_flags,
1807 d->last_adv_data,
1808 d->last_adv_data_len, NULL, 0, 0);
1809 }
1810
1811 /* Cancel this timer so that we don't try to disable scanning
1812 * when it's already disabled.
1813 */
1814 cancel_delayed_work(&hdev->le_scan_disable);
1815
1816 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1817
1818 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1819 * interrupted scanning due to a connect request. Mark
1820 * therefore discovery as stopped.
1821 */
1822 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1824 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1825 hdev->discovery.state == DISCOVERY_FINDING)
1826 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1827
1828 break;
1829
1830 default:
1831 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1832 enable);
1833 break;
1834 }
1835
1836 hci_dev_unlock(hdev);
1837 }
1838
hci_cc_le_set_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1839 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1840 struct sk_buff *skb)
1841 {
1842 struct hci_cp_le_set_scan_enable *cp;
1843 struct hci_ev_status *rp = data;
1844
1845 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1846
1847 if (rp->status)
1848 return rp->status;
1849
1850 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1851 if (!cp)
1852 return rp->status;
1853
1854 le_set_scan_enable_complete(hdev, cp->enable);
1855
1856 return rp->status;
1857 }
1858
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1859 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1860 struct sk_buff *skb)
1861 {
1862 struct hci_cp_le_set_ext_scan_enable *cp;
1863 struct hci_ev_status *rp = data;
1864
1865 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1866
1867 if (rp->status)
1868 return rp->status;
1869
1870 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1871 if (!cp)
1872 return rp->status;
1873
1874 le_set_scan_enable_complete(hdev, cp->enable);
1875
1876 return rp->status;
1877 }
1878
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1879 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1880 struct sk_buff *skb)
1881 {
1882 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1883
1884 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1885 rp->num_of_sets);
1886
1887 if (rp->status)
1888 return rp->status;
1889
1890 hdev->le_num_of_adv_sets = rp->num_of_sets;
1891
1892 return rp->status;
1893 }
1894
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1895 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1896 struct sk_buff *skb)
1897 {
1898 struct hci_rp_le_read_accept_list_size *rp = data;
1899
1900 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1901
1902 if (rp->status)
1903 return rp->status;
1904
1905 hdev->le_accept_list_size = rp->size;
1906
1907 return rp->status;
1908 }
1909
hci_cc_le_clear_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1910 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1911 struct sk_buff *skb)
1912 {
1913 struct hci_ev_status *rp = data;
1914
1915 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1916
1917 if (rp->status)
1918 return rp->status;
1919
1920 hci_dev_lock(hdev);
1921 hci_bdaddr_list_clear(&hdev->le_accept_list);
1922 hci_dev_unlock(hdev);
1923
1924 return rp->status;
1925 }
1926
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1927 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1928 struct sk_buff *skb)
1929 {
1930 struct hci_cp_le_add_to_accept_list *sent;
1931 struct hci_ev_status *rp = data;
1932
1933 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1934
1935 if (rp->status)
1936 return rp->status;
1937
1938 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1939 if (!sent)
1940 return rp->status;
1941
1942 hci_dev_lock(hdev);
1943 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1944 sent->bdaddr_type);
1945 hci_dev_unlock(hdev);
1946
1947 return rp->status;
1948 }
1949
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1950 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1951 struct sk_buff *skb)
1952 {
1953 struct hci_cp_le_del_from_accept_list *sent;
1954 struct hci_ev_status *rp = data;
1955
1956 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1957
1958 if (rp->status)
1959 return rp->status;
1960
1961 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1962 if (!sent)
1963 return rp->status;
1964
1965 hci_dev_lock(hdev);
1966 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1967 sent->bdaddr_type);
1968 hci_dev_unlock(hdev);
1969
1970 return rp->status;
1971 }
1972
hci_cc_le_read_supported_states(struct hci_dev * hdev,void * data,struct sk_buff * skb)1973 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1974 struct sk_buff *skb)
1975 {
1976 struct hci_rp_le_read_supported_states *rp = data;
1977
1978 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1979
1980 if (rp->status)
1981 return rp->status;
1982
1983 memcpy(hdev->le_states, rp->le_states, 8);
1984
1985 return rp->status;
1986 }
1987
hci_cc_le_read_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1988 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1989 struct sk_buff *skb)
1990 {
1991 struct hci_rp_le_read_def_data_len *rp = data;
1992
1993 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1994
1995 if (rp->status)
1996 return rp->status;
1997
1998 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1999 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
2000
2001 return rp->status;
2002 }
2003
hci_cc_le_write_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2004 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2005 struct sk_buff *skb)
2006 {
2007 struct hci_cp_le_write_def_data_len *sent;
2008 struct hci_ev_status *rp = data;
2009
2010 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2011
2012 if (rp->status)
2013 return rp->status;
2014
2015 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2016 if (!sent)
2017 return rp->status;
2018
2019 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2020 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2021
2022 return rp->status;
2023 }
2024
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2025 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2026 struct sk_buff *skb)
2027 {
2028 struct hci_cp_le_add_to_resolv_list *sent;
2029 struct hci_ev_status *rp = data;
2030
2031 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2032
2033 if (rp->status)
2034 return rp->status;
2035
2036 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2037 if (!sent)
2038 return rp->status;
2039
2040 hci_dev_lock(hdev);
2041 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2042 sent->bdaddr_type, sent->peer_irk,
2043 sent->local_irk);
2044 hci_dev_unlock(hdev);
2045
2046 return rp->status;
2047 }
2048
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2049 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2050 struct sk_buff *skb)
2051 {
2052 struct hci_cp_le_del_from_resolv_list *sent;
2053 struct hci_ev_status *rp = data;
2054
2055 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2056
2057 if (rp->status)
2058 return rp->status;
2059
2060 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2061 if (!sent)
2062 return rp->status;
2063
2064 hci_dev_lock(hdev);
2065 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2066 sent->bdaddr_type);
2067 hci_dev_unlock(hdev);
2068
2069 return rp->status;
2070 }
2071
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2072 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2073 struct sk_buff *skb)
2074 {
2075 struct hci_ev_status *rp = data;
2076
2077 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2078
2079 if (rp->status)
2080 return rp->status;
2081
2082 hci_dev_lock(hdev);
2083 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2084 hci_dev_unlock(hdev);
2085
2086 return rp->status;
2087 }
2088
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)2089 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2090 struct sk_buff *skb)
2091 {
2092 struct hci_rp_le_read_resolv_list_size *rp = data;
2093
2094 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2095
2096 if (rp->status)
2097 return rp->status;
2098
2099 hdev->le_resolv_list_size = rp->size;
2100
2101 return rp->status;
2102 }
2103
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)2104 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2105 struct sk_buff *skb)
2106 {
2107 struct hci_ev_status *rp = data;
2108 __u8 *sent;
2109
2110 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2111
2112 if (rp->status)
2113 return rp->status;
2114
2115 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2116 if (!sent)
2117 return rp->status;
2118
2119 hci_dev_lock(hdev);
2120
2121 if (*sent)
2122 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2123 else
2124 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2125
2126 hci_dev_unlock(hdev);
2127
2128 return rp->status;
2129 }
2130
hci_cc_le_read_max_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2131 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2132 struct sk_buff *skb)
2133 {
2134 struct hci_rp_le_read_max_data_len *rp = data;
2135
2136 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2137
2138 if (rp->status)
2139 return rp->status;
2140
2141 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2142 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2143 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2144 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2145
2146 return rp->status;
2147 }
2148
hci_cc_write_le_host_supported(struct hci_dev * hdev,void * data,struct sk_buff * skb)2149 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2150 struct sk_buff *skb)
2151 {
2152 struct hci_cp_write_le_host_supported *sent;
2153 struct hci_ev_status *rp = data;
2154
2155 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2156
2157 if (rp->status)
2158 return rp->status;
2159
2160 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2161 if (!sent)
2162 return rp->status;
2163
2164 hci_dev_lock(hdev);
2165
2166 if (sent->le) {
2167 hdev->features[1][0] |= LMP_HOST_LE;
2168 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2169 } else {
2170 hdev->features[1][0] &= ~LMP_HOST_LE;
2171 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2172 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2173 }
2174
2175 if (sent->simul)
2176 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2177 else
2178 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2179
2180 hci_dev_unlock(hdev);
2181
2182 return rp->status;
2183 }
2184
hci_cc_set_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2185 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2186 struct sk_buff *skb)
2187 {
2188 struct hci_cp_le_set_adv_param *cp;
2189 struct hci_ev_status *rp = data;
2190
2191 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2192
2193 if (rp->status)
2194 return rp->status;
2195
2196 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2197 if (!cp)
2198 return rp->status;
2199
2200 hci_dev_lock(hdev);
2201 hdev->adv_addr_type = cp->own_address_type;
2202 hci_dev_unlock(hdev);
2203
2204 return rp->status;
2205 }
2206
hci_cc_set_ext_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2207 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2208 struct sk_buff *skb)
2209 {
2210 struct hci_rp_le_set_ext_adv_params *rp = data;
2211 struct hci_cp_le_set_ext_adv_params *cp;
2212 struct adv_info *adv_instance;
2213
2214 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2215
2216 if (rp->status)
2217 return rp->status;
2218
2219 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2220 if (!cp)
2221 return rp->status;
2222
2223 hci_dev_lock(hdev);
2224 hdev->adv_addr_type = cp->own_addr_type;
2225 if (!cp->handle) {
2226 /* Store in hdev for instance 0 */
2227 hdev->adv_tx_power = rp->tx_power;
2228 } else {
2229 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2230 if (adv_instance)
2231 adv_instance->tx_power = rp->tx_power;
2232 }
2233 /* Update adv data as tx power is known now */
2234 hci_update_adv_data(hdev, cp->handle);
2235
2236 hci_dev_unlock(hdev);
2237
2238 return rp->status;
2239 }
2240
hci_cc_read_rssi(struct hci_dev * hdev,void * data,struct sk_buff * skb)2241 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2242 struct sk_buff *skb)
2243 {
2244 struct hci_rp_read_rssi *rp = data;
2245 struct hci_conn *conn;
2246
2247 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2248
2249 if (rp->status)
2250 return rp->status;
2251
2252 hci_dev_lock(hdev);
2253
2254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2255 if (conn)
2256 conn->rssi = rp->rssi;
2257
2258 hci_dev_unlock(hdev);
2259
2260 return rp->status;
2261 }
2262
hci_cc_read_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)2263 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2264 struct sk_buff *skb)
2265 {
2266 struct hci_cp_read_tx_power *sent;
2267 struct hci_rp_read_tx_power *rp = data;
2268 struct hci_conn *conn;
2269
2270 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2271
2272 if (rp->status)
2273 return rp->status;
2274
2275 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2276 if (!sent)
2277 return rp->status;
2278
2279 hci_dev_lock(hdev);
2280
2281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2282 if (!conn)
2283 goto unlock;
2284
2285 switch (sent->type) {
2286 case 0x00:
2287 conn->tx_power = rp->tx_power;
2288 break;
2289 case 0x01:
2290 conn->max_tx_power = rp->tx_power;
2291 break;
2292 }
2293
2294 unlock:
2295 hci_dev_unlock(hdev);
2296 return rp->status;
2297 }
2298
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)2299 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2300 struct sk_buff *skb)
2301 {
2302 struct hci_ev_status *rp = data;
2303 u8 *mode;
2304
2305 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2306
2307 if (rp->status)
2308 return rp->status;
2309
2310 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2311 if (mode)
2312 hdev->ssp_debug_mode = *mode;
2313
2314 return rp->status;
2315 }
2316
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)2317 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2318 {
2319 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2320
2321 if (status) {
2322 hci_conn_check_pending(hdev);
2323 return;
2324 }
2325
2326 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2327 set_bit(HCI_INQUIRY, &hdev->flags);
2328 }
2329
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)2330 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2331 {
2332 struct hci_cp_create_conn *cp;
2333 struct hci_conn *conn;
2334
2335 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2336
2337 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2338 if (!cp)
2339 return;
2340
2341 hci_dev_lock(hdev);
2342
2343 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2344
2345 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2346
2347 if (status) {
2348 if (conn && conn->state == BT_CONNECT) {
2349 if (status != 0x0c || conn->attempt > 2) {
2350 conn->state = BT_CLOSED;
2351 hci_connect_cfm(conn, status);
2352 hci_conn_del(conn);
2353 } else
2354 conn->state = BT_CONNECT2;
2355 }
2356 } else {
2357 if (!conn) {
2358 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2359 HCI_ROLE_MASTER);
2360 if (IS_ERR(conn))
2361 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2362 }
2363 }
2364
2365 hci_dev_unlock(hdev);
2366 }
2367
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)2368 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2369 {
2370 struct hci_cp_add_sco *cp;
2371 struct hci_conn *acl;
2372 struct hci_link *link;
2373 __u16 handle;
2374
2375 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2376
2377 if (!status)
2378 return;
2379
2380 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2381 if (!cp)
2382 return;
2383
2384 handle = __le16_to_cpu(cp->handle);
2385
2386 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2387
2388 hci_dev_lock(hdev);
2389
2390 acl = hci_conn_hash_lookup_handle(hdev, handle);
2391 if (acl) {
2392 link = list_first_entry_or_null(&acl->link_list,
2393 struct hci_link, list);
2394 if (link && link->conn) {
2395 link->conn->state = BT_CLOSED;
2396
2397 hci_connect_cfm(link->conn, status);
2398 hci_conn_del(link->conn);
2399 }
2400 }
2401
2402 hci_dev_unlock(hdev);
2403 }
2404
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2405 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2406 {
2407 struct hci_cp_auth_requested *cp;
2408 struct hci_conn *conn;
2409
2410 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2411
2412 if (!status)
2413 return;
2414
2415 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2416 if (!cp)
2417 return;
2418
2419 hci_dev_lock(hdev);
2420
2421 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2422 if (conn) {
2423 if (conn->state == BT_CONFIG) {
2424 hci_connect_cfm(conn, status);
2425 hci_conn_drop(conn);
2426 }
2427 }
2428
2429 hci_dev_unlock(hdev);
2430 }
2431
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2432 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2433 {
2434 struct hci_cp_set_conn_encrypt *cp;
2435 struct hci_conn *conn;
2436
2437 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2438
2439 if (!status)
2440 return;
2441
2442 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2443 if (!cp)
2444 return;
2445
2446 hci_dev_lock(hdev);
2447
2448 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2449 if (conn) {
2450 if (conn->state == BT_CONFIG) {
2451 hci_connect_cfm(conn, status);
2452 hci_conn_drop(conn);
2453 }
2454 }
2455
2456 hci_dev_unlock(hdev);
2457 }
2458
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2459 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2460 struct hci_conn *conn)
2461 {
2462 if (conn->state != BT_CONFIG || !conn->out)
2463 return 0;
2464
2465 if (conn->pending_sec_level == BT_SECURITY_SDP)
2466 return 0;
2467
2468 /* Only request authentication for SSP connections or non-SSP
2469 * devices with sec_level MEDIUM or HIGH or if MITM protection
2470 * is requested.
2471 */
2472 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2473 conn->pending_sec_level != BT_SECURITY_FIPS &&
2474 conn->pending_sec_level != BT_SECURITY_HIGH &&
2475 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2476 return 0;
2477
2478 return 1;
2479 }
2480
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2481 static int hci_resolve_name(struct hci_dev *hdev,
2482 struct inquiry_entry *e)
2483 {
2484 struct hci_cp_remote_name_req cp;
2485
2486 memset(&cp, 0, sizeof(cp));
2487
2488 bacpy(&cp.bdaddr, &e->data.bdaddr);
2489 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2490 cp.pscan_mode = e->data.pscan_mode;
2491 cp.clock_offset = e->data.clock_offset;
2492
2493 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2494 }
2495
hci_resolve_next_name(struct hci_dev * hdev)2496 static bool hci_resolve_next_name(struct hci_dev *hdev)
2497 {
2498 struct discovery_state *discov = &hdev->discovery;
2499 struct inquiry_entry *e;
2500
2501 if (list_empty(&discov->resolve))
2502 return false;
2503
2504 /* We should stop if we already spent too much time resolving names. */
2505 if (time_after(jiffies, discov->name_resolve_timeout)) {
2506 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2507 return false;
2508 }
2509
2510 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2511 if (!e)
2512 return false;
2513
2514 if (hci_resolve_name(hdev, e) == 0) {
2515 e->name_state = NAME_PENDING;
2516 return true;
2517 }
2518
2519 return false;
2520 }
2521
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2522 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2523 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2524 {
2525 struct discovery_state *discov = &hdev->discovery;
2526 struct inquiry_entry *e;
2527
2528 /* Update the mgmt connected state if necessary. Be careful with
2529 * conn objects that exist but are not (yet) connected however.
2530 * Only those in BT_CONFIG or BT_CONNECTED states can be
2531 * considered connected.
2532 */
2533 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2534 mgmt_device_connected(hdev, conn, name, name_len);
2535
2536 if (discov->state == DISCOVERY_STOPPED)
2537 return;
2538
2539 if (discov->state == DISCOVERY_STOPPING)
2540 goto discov_complete;
2541
2542 if (discov->state != DISCOVERY_RESOLVING)
2543 return;
2544
2545 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2546 /* If the device was not found in a list of found devices names of which
2547 * are pending. there is no need to continue resolving a next name as it
2548 * will be done upon receiving another Remote Name Request Complete
2549 * Event */
2550 if (!e)
2551 return;
2552
2553 list_del(&e->list);
2554
2555 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2556 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2557 name, name_len);
2558
2559 if (hci_resolve_next_name(hdev))
2560 return;
2561
2562 discov_complete:
2563 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2564 }
2565
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2566 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2567 {
2568 struct hci_cp_remote_name_req *cp;
2569 struct hci_conn *conn;
2570
2571 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2572
2573 /* If successful wait for the name req complete event before
2574 * checking for the need to do authentication */
2575 if (!status)
2576 return;
2577
2578 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2579 if (!cp)
2580 return;
2581
2582 hci_dev_lock(hdev);
2583
2584 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2585
2586 if (hci_dev_test_flag(hdev, HCI_MGMT))
2587 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2588
2589 if (!conn)
2590 goto unlock;
2591
2592 if (!hci_outgoing_auth_needed(hdev, conn))
2593 goto unlock;
2594
2595 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2596 struct hci_cp_auth_requested auth_cp;
2597
2598 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2599
2600 auth_cp.handle = __cpu_to_le16(conn->handle);
2601 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2602 sizeof(auth_cp), &auth_cp);
2603 }
2604
2605 unlock:
2606 hci_dev_unlock(hdev);
2607 }
2608
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2609 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2610 {
2611 struct hci_cp_read_remote_features *cp;
2612 struct hci_conn *conn;
2613
2614 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2615
2616 if (!status)
2617 return;
2618
2619 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2620 if (!cp)
2621 return;
2622
2623 hci_dev_lock(hdev);
2624
2625 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2626 if (conn) {
2627 if (conn->state == BT_CONFIG) {
2628 hci_connect_cfm(conn, status);
2629 hci_conn_drop(conn);
2630 }
2631 }
2632
2633 hci_dev_unlock(hdev);
2634 }
2635
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2636 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2637 {
2638 struct hci_cp_read_remote_ext_features *cp;
2639 struct hci_conn *conn;
2640
2641 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2642
2643 if (!status)
2644 return;
2645
2646 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2647 if (!cp)
2648 return;
2649
2650 hci_dev_lock(hdev);
2651
2652 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2653 if (conn) {
2654 if (conn->state == BT_CONFIG) {
2655 hci_connect_cfm(conn, status);
2656 hci_conn_drop(conn);
2657 }
2658 }
2659
2660 hci_dev_unlock(hdev);
2661 }
2662
hci_setup_sync_conn_status(struct hci_dev * hdev,__u16 handle,__u8 status)2663 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2664 __u8 status)
2665 {
2666 struct hci_conn *acl;
2667 struct hci_link *link;
2668
2669 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2670
2671 hci_dev_lock(hdev);
2672
2673 acl = hci_conn_hash_lookup_handle(hdev, handle);
2674 if (acl) {
2675 link = list_first_entry_or_null(&acl->link_list,
2676 struct hci_link, list);
2677 if (link && link->conn) {
2678 link->conn->state = BT_CLOSED;
2679
2680 hci_connect_cfm(link->conn, status);
2681 hci_conn_del(link->conn);
2682 }
2683 }
2684
2685 hci_dev_unlock(hdev);
2686 }
2687
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2688 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2689 {
2690 struct hci_cp_setup_sync_conn *cp;
2691
2692 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2693
2694 if (!status)
2695 return;
2696
2697 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2698 if (!cp)
2699 return;
2700
2701 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2702 }
2703
hci_cs_enhanced_setup_sync_conn(struct hci_dev * hdev,__u8 status)2704 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2705 {
2706 struct hci_cp_enhanced_setup_sync_conn *cp;
2707
2708 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2709
2710 if (!status)
2711 return;
2712
2713 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2714 if (!cp)
2715 return;
2716
2717 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2718 }
2719
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2720 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2721 {
2722 struct hci_cp_sniff_mode *cp;
2723 struct hci_conn *conn;
2724
2725 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2726
2727 if (!status)
2728 return;
2729
2730 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2731 if (!cp)
2732 return;
2733
2734 hci_dev_lock(hdev);
2735
2736 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2737 if (conn) {
2738 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2739
2740 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2741 hci_sco_setup(conn, status);
2742 }
2743
2744 hci_dev_unlock(hdev);
2745 }
2746
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2747 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2748 {
2749 struct hci_cp_exit_sniff_mode *cp;
2750 struct hci_conn *conn;
2751
2752 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2753
2754 if (!status)
2755 return;
2756
2757 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2758 if (!cp)
2759 return;
2760
2761 hci_dev_lock(hdev);
2762
2763 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2764 if (conn) {
2765 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2766
2767 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2768 hci_sco_setup(conn, status);
2769 }
2770
2771 hci_dev_unlock(hdev);
2772 }
2773
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2774 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2775 {
2776 struct hci_cp_disconnect *cp;
2777 struct hci_conn_params *params;
2778 struct hci_conn *conn;
2779 bool mgmt_conn;
2780
2781 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2782
2783 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2784 * otherwise cleanup the connection immediately.
2785 */
2786 if (!status && !hdev->suspended)
2787 return;
2788
2789 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2790 if (!cp)
2791 return;
2792
2793 hci_dev_lock(hdev);
2794
2795 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2796 if (!conn)
2797 goto unlock;
2798
2799 if (status) {
2800 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2801 conn->dst_type, status);
2802
2803 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2804 hdev->cur_adv_instance = conn->adv_instance;
2805 hci_enable_advertising(hdev);
2806 }
2807
2808 /* Inform sockets conn is gone before we delete it */
2809 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2810
2811 goto done;
2812 }
2813
2814 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2815
2816 if (conn->type == ACL_LINK) {
2817 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2818 hci_remove_link_key(hdev, &conn->dst);
2819 }
2820
2821 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2822 if (params) {
2823 switch (params->auto_connect) {
2824 case HCI_AUTO_CONN_LINK_LOSS:
2825 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2826 break;
2827 fallthrough;
2828
2829 case HCI_AUTO_CONN_DIRECT:
2830 case HCI_AUTO_CONN_ALWAYS:
2831 hci_pend_le_list_del_init(params);
2832 hci_pend_le_list_add(params, &hdev->pend_le_conns);
2833 break;
2834
2835 default:
2836 break;
2837 }
2838 }
2839
2840 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2841 cp->reason, mgmt_conn);
2842
2843 hci_disconn_cfm(conn, cp->reason);
2844
2845 done:
2846 /* If the disconnection failed for any reason, the upper layer
2847 * does not retry to disconnect in current implementation.
2848 * Hence, we need to do some basic cleanup here and re-enable
2849 * advertising if necessary.
2850 */
2851 hci_conn_del(conn);
2852 unlock:
2853 hci_dev_unlock(hdev);
2854 }
2855
ev_bdaddr_type(struct hci_dev * hdev,u8 type,bool * resolved)2856 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2857 {
2858 /* When using controller based address resolution, then the new
2859 * address types 0x02 and 0x03 are used. These types need to be
2860 * converted back into either public address or random address type
2861 */
2862 switch (type) {
2863 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2864 if (resolved)
2865 *resolved = true;
2866 return ADDR_LE_DEV_PUBLIC;
2867 case ADDR_LE_DEV_RANDOM_RESOLVED:
2868 if (resolved)
2869 *resolved = true;
2870 return ADDR_LE_DEV_RANDOM;
2871 }
2872
2873 if (resolved)
2874 *resolved = false;
2875 return type;
2876 }
2877
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2878 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2879 u8 peer_addr_type, u8 own_address_type,
2880 u8 filter_policy)
2881 {
2882 struct hci_conn *conn;
2883
2884 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2885 peer_addr_type);
2886 if (!conn)
2887 return;
2888
2889 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2890
2891 /* Store the initiator and responder address information which
2892 * is needed for SMP. These values will not change during the
2893 * lifetime of the connection.
2894 */
2895 conn->init_addr_type = own_address_type;
2896 if (own_address_type == ADDR_LE_DEV_RANDOM)
2897 bacpy(&conn->init_addr, &hdev->random_addr);
2898 else
2899 bacpy(&conn->init_addr, &hdev->bdaddr);
2900
2901 conn->resp_addr_type = peer_addr_type;
2902 bacpy(&conn->resp_addr, peer_addr);
2903 }
2904
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2905 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2906 {
2907 struct hci_cp_le_create_conn *cp;
2908
2909 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2910
2911 /* All connection failure handling is taken care of by the
2912 * hci_conn_failed function which is triggered by the HCI
2913 * request completion callbacks used for connecting.
2914 */
2915 if (status)
2916 return;
2917
2918 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2919 if (!cp)
2920 return;
2921
2922 hci_dev_lock(hdev);
2923
2924 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2925 cp->own_address_type, cp->filter_policy);
2926
2927 hci_dev_unlock(hdev);
2928 }
2929
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2930 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2931 {
2932 struct hci_cp_le_ext_create_conn *cp;
2933
2934 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2935
2936 /* All connection failure handling is taken care of by the
2937 * hci_conn_failed function which is triggered by the HCI
2938 * request completion callbacks used for connecting.
2939 */
2940 if (status)
2941 return;
2942
2943 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2944 if (!cp)
2945 return;
2946
2947 hci_dev_lock(hdev);
2948
2949 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2950 cp->own_addr_type, cp->filter_policy);
2951
2952 hci_dev_unlock(hdev);
2953 }
2954
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2955 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2956 {
2957 struct hci_cp_le_read_remote_features *cp;
2958 struct hci_conn *conn;
2959
2960 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2961
2962 if (!status)
2963 return;
2964
2965 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2966 if (!cp)
2967 return;
2968
2969 hci_dev_lock(hdev);
2970
2971 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2972 if (conn) {
2973 if (conn->state == BT_CONFIG) {
2974 hci_connect_cfm(conn, status);
2975 hci_conn_drop(conn);
2976 }
2977 }
2978
2979 hci_dev_unlock(hdev);
2980 }
2981
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2982 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2983 {
2984 struct hci_cp_le_start_enc *cp;
2985 struct hci_conn *conn;
2986
2987 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2988
2989 if (!status)
2990 return;
2991
2992 hci_dev_lock(hdev);
2993
2994 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2995 if (!cp)
2996 goto unlock;
2997
2998 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2999 if (!conn)
3000 goto unlock;
3001
3002 if (conn->state != BT_CONNECTED)
3003 goto unlock;
3004
3005 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3006 hci_conn_drop(conn);
3007
3008 unlock:
3009 hci_dev_unlock(hdev);
3010 }
3011
hci_cs_switch_role(struct hci_dev * hdev,u8 status)3012 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3013 {
3014 struct hci_cp_switch_role *cp;
3015 struct hci_conn *conn;
3016
3017 BT_DBG("%s status 0x%2.2x", hdev->name, status);
3018
3019 if (!status)
3020 return;
3021
3022 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3023 if (!cp)
3024 return;
3025
3026 hci_dev_lock(hdev);
3027
3028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3029 if (conn)
3030 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3031
3032 hci_dev_unlock(hdev);
3033 }
3034
hci_inquiry_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3035 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3036 struct sk_buff *skb)
3037 {
3038 struct hci_ev_status *ev = data;
3039 struct discovery_state *discov = &hdev->discovery;
3040 struct inquiry_entry *e;
3041
3042 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3043
3044 hci_conn_check_pending(hdev);
3045
3046 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3047 return;
3048
3049 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3050 wake_up_bit(&hdev->flags, HCI_INQUIRY);
3051
3052 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3053 return;
3054
3055 hci_dev_lock(hdev);
3056
3057 if (discov->state != DISCOVERY_FINDING)
3058 goto unlock;
3059
3060 if (list_empty(&discov->resolve)) {
3061 /* When BR/EDR inquiry is active and no LE scanning is in
3062 * progress, then change discovery state to indicate completion.
3063 *
3064 * When running LE scanning and BR/EDR inquiry simultaneously
3065 * and the LE scan already finished, then change the discovery
3066 * state to indicate completion.
3067 */
3068 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3069 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3070 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3071 goto unlock;
3072 }
3073
3074 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3075 if (e && hci_resolve_name(hdev, e) == 0) {
3076 e->name_state = NAME_PENDING;
3077 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3078 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3079 } else {
3080 /* When BR/EDR inquiry is active and no LE scanning is in
3081 * progress, then change discovery state to indicate completion.
3082 *
3083 * When running LE scanning and BR/EDR inquiry simultaneously
3084 * and the LE scan already finished, then change the discovery
3085 * state to indicate completion.
3086 */
3087 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3088 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3089 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3090 }
3091
3092 unlock:
3093 hci_dev_unlock(hdev);
3094 }
3095
hci_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)3096 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3097 struct sk_buff *skb)
3098 {
3099 struct hci_ev_inquiry_result *ev = edata;
3100 struct inquiry_data data;
3101 int i;
3102
3103 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3104 flex_array_size(ev, info, ev->num)))
3105 return;
3106
3107 bt_dev_dbg(hdev, "num %d", ev->num);
3108
3109 if (!ev->num)
3110 return;
3111
3112 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3113 return;
3114
3115 hci_dev_lock(hdev);
3116
3117 for (i = 0; i < ev->num; i++) {
3118 struct inquiry_info *info = &ev->info[i];
3119 u32 flags;
3120
3121 bacpy(&data.bdaddr, &info->bdaddr);
3122 data.pscan_rep_mode = info->pscan_rep_mode;
3123 data.pscan_period_mode = info->pscan_period_mode;
3124 data.pscan_mode = info->pscan_mode;
3125 memcpy(data.dev_class, info->dev_class, 3);
3126 data.clock_offset = info->clock_offset;
3127 data.rssi = HCI_RSSI_INVALID;
3128 data.ssp_mode = 0x00;
3129
3130 flags = hci_inquiry_cache_update(hdev, &data, false);
3131
3132 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3133 info->dev_class, HCI_RSSI_INVALID,
3134 flags, NULL, 0, NULL, 0, 0);
3135 }
3136
3137 hci_dev_unlock(hdev);
3138 }
3139
hci_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3140 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3141 struct sk_buff *skb)
3142 {
3143 struct hci_ev_conn_complete *ev = data;
3144 struct hci_conn *conn;
3145 u8 status = ev->status;
3146
3147 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3148
3149 hci_dev_lock(hdev);
3150
3151 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3152 if (!conn) {
3153 /* In case of error status and there is no connection pending
3154 * just unlock as there is nothing to cleanup.
3155 */
3156 if (ev->status)
3157 goto unlock;
3158
3159 /* Connection may not exist if auto-connected. Check the bredr
3160 * allowlist to see if this device is allowed to auto connect.
3161 * If link is an ACL type, create a connection class
3162 * automatically.
3163 *
3164 * Auto-connect will only occur if the event filter is
3165 * programmed with a given address. Right now, event filter is
3166 * only used during suspend.
3167 */
3168 if (ev->link_type == ACL_LINK &&
3169 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3170 &ev->bdaddr,
3171 BDADDR_BREDR)) {
3172 conn = hci_conn_add_unset(hdev, ev->link_type,
3173 &ev->bdaddr, HCI_ROLE_SLAVE);
3174 if (IS_ERR(conn)) {
3175 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3176 goto unlock;
3177 }
3178 } else {
3179 if (ev->link_type != SCO_LINK)
3180 goto unlock;
3181
3182 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3183 &ev->bdaddr);
3184 if (!conn)
3185 goto unlock;
3186
3187 conn->type = SCO_LINK;
3188 }
3189 }
3190
3191 /* The HCI_Connection_Complete event is only sent once per connection.
3192 * Processing it more than once per connection can corrupt kernel memory.
3193 *
3194 * As the connection handle is set here for the first time, it indicates
3195 * whether the connection is already set up.
3196 */
3197 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3198 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3199 goto unlock;
3200 }
3201
3202 if (!status) {
3203 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3204 if (status)
3205 goto done;
3206
3207 if (conn->type == ACL_LINK) {
3208 conn->state = BT_CONFIG;
3209 hci_conn_hold(conn);
3210
3211 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3212 !hci_find_link_key(hdev, &ev->bdaddr))
3213 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3214 else
3215 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3216 } else
3217 conn->state = BT_CONNECTED;
3218
3219 hci_debugfs_create_conn(conn);
3220 hci_conn_add_sysfs(conn);
3221
3222 if (test_bit(HCI_AUTH, &hdev->flags))
3223 set_bit(HCI_CONN_AUTH, &conn->flags);
3224
3225 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3226 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3227
3228 /* "Link key request" completed ahead of "connect request" completes */
3229 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3230 ev->link_type == ACL_LINK) {
3231 struct link_key *key;
3232 struct hci_cp_read_enc_key_size cp;
3233
3234 key = hci_find_link_key(hdev, &ev->bdaddr);
3235 if (key) {
3236 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3237
3238 if (!read_key_size_capable(hdev)) {
3239 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3240 } else {
3241 cp.handle = cpu_to_le16(conn->handle);
3242 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3243 sizeof(cp), &cp)) {
3244 bt_dev_err(hdev, "sending read key size failed");
3245 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3246 }
3247 }
3248
3249 hci_encrypt_cfm(conn, ev->status);
3250 }
3251 }
3252
3253 /* Get remote features */
3254 if (conn->type == ACL_LINK) {
3255 struct hci_cp_read_remote_features cp;
3256 cp.handle = ev->handle;
3257 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3258 sizeof(cp), &cp);
3259
3260 hci_update_scan(hdev);
3261 }
3262
3263 /* Set packet type for incoming connection */
3264 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3265 struct hci_cp_change_conn_ptype cp;
3266 cp.handle = ev->handle;
3267 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3268 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3269 &cp);
3270 }
3271 }
3272
3273 if (conn->type == ACL_LINK)
3274 hci_sco_setup(conn, ev->status);
3275
3276 done:
3277 if (status) {
3278 hci_conn_failed(conn, status);
3279 } else if (ev->link_type == SCO_LINK) {
3280 switch (conn->setting & SCO_AIRMODE_MASK) {
3281 case SCO_AIRMODE_CVSD:
3282 if (hdev->notify)
3283 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3284 break;
3285 }
3286
3287 hci_connect_cfm(conn, status);
3288 }
3289
3290 unlock:
3291 hci_dev_unlock(hdev);
3292
3293 hci_conn_check_pending(hdev);
3294 }
3295
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)3296 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3297 {
3298 struct hci_cp_reject_conn_req cp;
3299
3300 bacpy(&cp.bdaddr, bdaddr);
3301 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3302 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3303 }
3304
hci_conn_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3305 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3306 struct sk_buff *skb)
3307 {
3308 struct hci_ev_conn_request *ev = data;
3309 int mask = hdev->link_mode;
3310 struct inquiry_entry *ie;
3311 struct hci_conn *conn;
3312 __u8 flags = 0;
3313
3314 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3315
3316 /* Reject incoming connection from device with same BD ADDR against
3317 * CVE-2020-26555
3318 */
3319 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3320 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3321 &ev->bdaddr);
3322 hci_reject_conn(hdev, &ev->bdaddr);
3323 return;
3324 }
3325
3326 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3327 &flags);
3328
3329 if (!(mask & HCI_LM_ACCEPT)) {
3330 hci_reject_conn(hdev, &ev->bdaddr);
3331 return;
3332 }
3333
3334 hci_dev_lock(hdev);
3335
3336 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3337 BDADDR_BREDR)) {
3338 hci_reject_conn(hdev, &ev->bdaddr);
3339 goto unlock;
3340 }
3341
3342 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3343 * connection. These features are only touched through mgmt so
3344 * only do the checks if HCI_MGMT is set.
3345 */
3346 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3347 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3348 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3349 BDADDR_BREDR)) {
3350 hci_reject_conn(hdev, &ev->bdaddr);
3351 goto unlock;
3352 }
3353
3354 /* Connection accepted */
3355
3356 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3357 if (ie)
3358 memcpy(ie->data.dev_class, ev->dev_class, 3);
3359
3360 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3361 &ev->bdaddr);
3362 if (!conn) {
3363 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3364 HCI_ROLE_SLAVE);
3365 if (IS_ERR(conn)) {
3366 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3367 goto unlock;
3368 }
3369 }
3370
3371 memcpy(conn->dev_class, ev->dev_class, 3);
3372
3373 hci_dev_unlock(hdev);
3374
3375 if (ev->link_type == ACL_LINK ||
3376 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3377 struct hci_cp_accept_conn_req cp;
3378 conn->state = BT_CONNECT;
3379
3380 bacpy(&cp.bdaddr, &ev->bdaddr);
3381
3382 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3383 cp.role = 0x00; /* Become central */
3384 else
3385 cp.role = 0x01; /* Remain peripheral */
3386
3387 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3388 } else if (!(flags & HCI_PROTO_DEFER)) {
3389 struct hci_cp_accept_sync_conn_req cp;
3390 conn->state = BT_CONNECT;
3391
3392 bacpy(&cp.bdaddr, &ev->bdaddr);
3393 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3394
3395 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3396 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3397 cp.max_latency = cpu_to_le16(0xffff);
3398 cp.content_format = cpu_to_le16(hdev->voice_setting);
3399 cp.retrans_effort = 0xff;
3400
3401 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3402 &cp);
3403 } else {
3404 conn->state = BT_CONNECT2;
3405 hci_connect_cfm(conn, 0);
3406 }
3407
3408 return;
3409 unlock:
3410 hci_dev_unlock(hdev);
3411 }
3412
hci_to_mgmt_reason(u8 err)3413 static u8 hci_to_mgmt_reason(u8 err)
3414 {
3415 switch (err) {
3416 case HCI_ERROR_CONNECTION_TIMEOUT:
3417 return MGMT_DEV_DISCONN_TIMEOUT;
3418 case HCI_ERROR_REMOTE_USER_TERM:
3419 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3420 case HCI_ERROR_REMOTE_POWER_OFF:
3421 return MGMT_DEV_DISCONN_REMOTE;
3422 case HCI_ERROR_LOCAL_HOST_TERM:
3423 return MGMT_DEV_DISCONN_LOCAL_HOST;
3424 default:
3425 return MGMT_DEV_DISCONN_UNKNOWN;
3426 }
3427 }
3428
hci_disconn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3429 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3430 struct sk_buff *skb)
3431 {
3432 struct hci_ev_disconn_complete *ev = data;
3433 u8 reason;
3434 struct hci_conn_params *params;
3435 struct hci_conn *conn;
3436 bool mgmt_connected;
3437
3438 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3439
3440 hci_dev_lock(hdev);
3441
3442 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3443 if (!conn)
3444 goto unlock;
3445
3446 if (ev->status) {
3447 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3448 conn->dst_type, ev->status);
3449 goto unlock;
3450 }
3451
3452 conn->state = BT_CLOSED;
3453
3454 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3455
3456 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3457 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3458 else
3459 reason = hci_to_mgmt_reason(ev->reason);
3460
3461 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3462 reason, mgmt_connected);
3463
3464 if (conn->type == ACL_LINK) {
3465 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3466 hci_remove_link_key(hdev, &conn->dst);
3467
3468 hci_update_scan(hdev);
3469 }
3470
3471 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3472 if (params) {
3473 switch (params->auto_connect) {
3474 case HCI_AUTO_CONN_LINK_LOSS:
3475 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3476 break;
3477 fallthrough;
3478
3479 case HCI_AUTO_CONN_DIRECT:
3480 case HCI_AUTO_CONN_ALWAYS:
3481 hci_pend_le_list_del_init(params);
3482 hci_pend_le_list_add(params, &hdev->pend_le_conns);
3483 hci_update_passive_scan(hdev);
3484 break;
3485
3486 default:
3487 break;
3488 }
3489 }
3490
3491 hci_disconn_cfm(conn, ev->reason);
3492
3493 /* Re-enable advertising if necessary, since it might
3494 * have been disabled by the connection. From the
3495 * HCI_LE_Set_Advertise_Enable command description in
3496 * the core specification (v4.0):
3497 * "The Controller shall continue advertising until the Host
3498 * issues an LE_Set_Advertise_Enable command with
3499 * Advertising_Enable set to 0x00 (Advertising is disabled)
3500 * or until a connection is created or until the Advertising
3501 * is timed out due to Directed Advertising."
3502 */
3503 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3504 hdev->cur_adv_instance = conn->adv_instance;
3505 hci_enable_advertising(hdev);
3506 }
3507
3508 hci_conn_del(conn);
3509
3510 unlock:
3511 hci_dev_unlock(hdev);
3512 }
3513
hci_auth_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3514 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3515 struct sk_buff *skb)
3516 {
3517 struct hci_ev_auth_complete *ev = data;
3518 struct hci_conn *conn;
3519
3520 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3521
3522 hci_dev_lock(hdev);
3523
3524 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3525 if (!conn)
3526 goto unlock;
3527
3528 if (!ev->status) {
3529 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3530 set_bit(HCI_CONN_AUTH, &conn->flags);
3531 conn->sec_level = conn->pending_sec_level;
3532 } else {
3533 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3534 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3535
3536 mgmt_auth_failed(conn, ev->status);
3537 }
3538
3539 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3540
3541 if (conn->state == BT_CONFIG) {
3542 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3543 struct hci_cp_set_conn_encrypt cp;
3544 cp.handle = ev->handle;
3545 cp.encrypt = 0x01;
3546 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3547 &cp);
3548 } else {
3549 conn->state = BT_CONNECTED;
3550 hci_connect_cfm(conn, ev->status);
3551 hci_conn_drop(conn);
3552 }
3553 } else {
3554 hci_auth_cfm(conn, ev->status);
3555
3556 hci_conn_hold(conn);
3557 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3558 hci_conn_drop(conn);
3559 }
3560
3561 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3562 if (!ev->status) {
3563 struct hci_cp_set_conn_encrypt cp;
3564 cp.handle = ev->handle;
3565 cp.encrypt = 0x01;
3566 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3567 &cp);
3568 } else {
3569 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3570 hci_encrypt_cfm(conn, ev->status);
3571 }
3572 }
3573
3574 unlock:
3575 hci_dev_unlock(hdev);
3576 }
3577
hci_remote_name_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3578 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3579 struct sk_buff *skb)
3580 {
3581 struct hci_ev_remote_name *ev = data;
3582 struct hci_conn *conn;
3583
3584 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3585
3586 hci_dev_lock(hdev);
3587
3588 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3589
3590 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3591 goto check_auth;
3592
3593 if (ev->status == 0)
3594 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3595 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3596 else
3597 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3598
3599 check_auth:
3600 if (!conn)
3601 goto unlock;
3602
3603 if (!hci_outgoing_auth_needed(hdev, conn))
3604 goto unlock;
3605
3606 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3607 struct hci_cp_auth_requested cp;
3608
3609 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3610
3611 cp.handle = __cpu_to_le16(conn->handle);
3612 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3613 }
3614
3615 unlock:
3616 hci_dev_unlock(hdev);
3617 }
3618
hci_encrypt_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3619 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3620 struct sk_buff *skb)
3621 {
3622 struct hci_ev_encrypt_change *ev = data;
3623 struct hci_conn *conn;
3624
3625 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3626
3627 hci_dev_lock(hdev);
3628
3629 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3630 if (!conn)
3631 goto unlock;
3632
3633 if (!ev->status) {
3634 if (ev->encrypt) {
3635 /* Encryption implies authentication */
3636 set_bit(HCI_CONN_AUTH, &conn->flags);
3637 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3638 conn->sec_level = conn->pending_sec_level;
3639
3640 /* P-256 authentication key implies FIPS */
3641 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3642 set_bit(HCI_CONN_FIPS, &conn->flags);
3643
3644 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3645 conn->type == LE_LINK)
3646 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3647 } else {
3648 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3649 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3650 }
3651 }
3652
3653 /* We should disregard the current RPA and generate a new one
3654 * whenever the encryption procedure fails.
3655 */
3656 if (ev->status && conn->type == LE_LINK) {
3657 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3658 hci_adv_instances_set_rpa_expired(hdev, true);
3659 }
3660
3661 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3662
3663 /* Check link security requirements are met */
3664 if (!hci_conn_check_link_mode(conn))
3665 ev->status = HCI_ERROR_AUTH_FAILURE;
3666
3667 if (ev->status && conn->state == BT_CONNECTED) {
3668 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3669 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3670
3671 /* Notify upper layers so they can cleanup before
3672 * disconnecting.
3673 */
3674 hci_encrypt_cfm(conn, ev->status);
3675 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3676 hci_conn_drop(conn);
3677 goto unlock;
3678 }
3679
3680 /* Try reading the encryption key size for encrypted ACL links */
3681 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3682 struct hci_cp_read_enc_key_size cp;
3683
3684 /* Only send HCI_Read_Encryption_Key_Size if the
3685 * controller really supports it. If it doesn't, assume
3686 * the default size (16).
3687 */
3688 if (!read_key_size_capable(hdev)) {
3689 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3690 goto notify;
3691 }
3692
3693 cp.handle = cpu_to_le16(conn->handle);
3694 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3695 sizeof(cp), &cp)) {
3696 bt_dev_err(hdev, "sending read key size failed");
3697 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3698 goto notify;
3699 }
3700
3701 goto unlock;
3702 }
3703
3704 /* Set the default Authenticated Payload Timeout after
3705 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3706 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3707 * sent when the link is active and Encryption is enabled, the conn
3708 * type can be either LE or ACL and controller must support LMP Ping.
3709 * Ensure for AES-CCM encryption as well.
3710 */
3711 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3712 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3713 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3714 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3715 struct hci_cp_write_auth_payload_to cp;
3716
3717 cp.handle = cpu_to_le16(conn->handle);
3718 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3719 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3720 sizeof(cp), &cp))
3721 bt_dev_err(hdev, "write auth payload timeout failed");
3722 }
3723
3724 notify:
3725 hci_encrypt_cfm(conn, ev->status);
3726
3727 unlock:
3728 hci_dev_unlock(hdev);
3729 }
3730
hci_change_link_key_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3731 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3732 struct sk_buff *skb)
3733 {
3734 struct hci_ev_change_link_key_complete *ev = data;
3735 struct hci_conn *conn;
3736
3737 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3738
3739 hci_dev_lock(hdev);
3740
3741 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3742 if (conn) {
3743 if (!ev->status)
3744 set_bit(HCI_CONN_SECURE, &conn->flags);
3745
3746 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3747
3748 hci_key_change_cfm(conn, ev->status);
3749 }
3750
3751 hci_dev_unlock(hdev);
3752 }
3753
hci_remote_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3754 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3755 struct sk_buff *skb)
3756 {
3757 struct hci_ev_remote_features *ev = data;
3758 struct hci_conn *conn;
3759
3760 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3761
3762 hci_dev_lock(hdev);
3763
3764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3765 if (!conn)
3766 goto unlock;
3767
3768 if (!ev->status)
3769 memcpy(conn->features[0], ev->features, 8);
3770
3771 if (conn->state != BT_CONFIG)
3772 goto unlock;
3773
3774 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3775 lmp_ext_feat_capable(conn)) {
3776 struct hci_cp_read_remote_ext_features cp;
3777 cp.handle = ev->handle;
3778 cp.page = 0x01;
3779 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3780 sizeof(cp), &cp);
3781 goto unlock;
3782 }
3783
3784 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3785 struct hci_cp_remote_name_req cp;
3786 memset(&cp, 0, sizeof(cp));
3787 bacpy(&cp.bdaddr, &conn->dst);
3788 cp.pscan_rep_mode = 0x02;
3789 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3790 } else {
3791 mgmt_device_connected(hdev, conn, NULL, 0);
3792 }
3793
3794 if (!hci_outgoing_auth_needed(hdev, conn)) {
3795 conn->state = BT_CONNECTED;
3796 hci_connect_cfm(conn, ev->status);
3797 hci_conn_drop(conn);
3798 }
3799
3800 unlock:
3801 hci_dev_unlock(hdev);
3802 }
3803
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3804 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3805 {
3806 cancel_delayed_work(&hdev->cmd_timer);
3807
3808 rcu_read_lock();
3809 if (!test_bit(HCI_RESET, &hdev->flags)) {
3810 if (ncmd) {
3811 cancel_delayed_work(&hdev->ncmd_timer);
3812 atomic_set(&hdev->cmd_cnt, 1);
3813 } else {
3814 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3815 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3816 HCI_NCMD_TIMEOUT);
3817 }
3818 }
3819 rcu_read_unlock();
3820 }
3821
hci_cc_le_read_buffer_size_v2(struct hci_dev * hdev,void * data,struct sk_buff * skb)3822 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3823 struct sk_buff *skb)
3824 {
3825 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3826
3827 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3828
3829 if (rp->status)
3830 return rp->status;
3831
3832 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3833 hdev->le_pkts = rp->acl_max_pkt;
3834 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3835 hdev->iso_pkts = rp->iso_max_pkt;
3836
3837 hdev->le_cnt = hdev->le_pkts;
3838 hdev->iso_cnt = hdev->iso_pkts;
3839
3840 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3841 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3842
3843 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3844 return HCI_ERROR_INVALID_PARAMETERS;
3845
3846 return rp->status;
3847 }
3848
hci_unbound_cis_failed(struct hci_dev * hdev,u8 cig,u8 status)3849 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3850 {
3851 struct hci_conn *conn, *tmp;
3852
3853 lockdep_assert_held(&hdev->lock);
3854
3855 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3856 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3857 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3858 continue;
3859
3860 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3861 hci_conn_failed(conn, status);
3862 }
3863 }
3864
hci_cc_le_set_cig_params(struct hci_dev * hdev,void * data,struct sk_buff * skb)3865 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3866 struct sk_buff *skb)
3867 {
3868 struct hci_rp_le_set_cig_params *rp = data;
3869 struct hci_cp_le_set_cig_params *cp;
3870 struct hci_conn *conn;
3871 u8 status = rp->status;
3872 bool pending = false;
3873 int i;
3874
3875 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3876
3877 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3878 if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3879 rp->cig_id != cp->cig_id)) {
3880 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3881 status = HCI_ERROR_UNSPECIFIED;
3882 }
3883
3884 hci_dev_lock(hdev);
3885
3886 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3887 *
3888 * If the Status return parameter is non-zero, then the state of the CIG
3889 * and its CIS configurations shall not be changed by the command. If
3890 * the CIG did not already exist, it shall not be created.
3891 */
3892 if (status) {
3893 /* Keep current configuration, fail only the unbound CIS */
3894 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3895 goto unlock;
3896 }
3897
3898 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3899 *
3900 * If the Status return parameter is zero, then the Controller shall
3901 * set the Connection_Handle arrayed return parameter to the connection
3902 * handle(s) corresponding to the CIS configurations specified in
3903 * the CIS_IDs command parameter, in the same order.
3904 */
3905 for (i = 0; i < rp->num_handles; ++i) {
3906 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3907 cp->cis[i].cis_id);
3908 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3909 continue;
3910
3911 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3912 continue;
3913
3914 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3915 continue;
3916
3917 if (conn->state == BT_CONNECT)
3918 pending = true;
3919 }
3920
3921 unlock:
3922 if (pending)
3923 hci_le_create_cis_pending(hdev);
3924
3925 hci_dev_unlock(hdev);
3926
3927 return rp->status;
3928 }
3929
hci_cc_le_setup_iso_path(struct hci_dev * hdev,void * data,struct sk_buff * skb)3930 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3931 struct sk_buff *skb)
3932 {
3933 struct hci_rp_le_setup_iso_path *rp = data;
3934 struct hci_cp_le_setup_iso_path *cp;
3935 struct hci_conn *conn;
3936
3937 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3938
3939 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3940 if (!cp)
3941 return rp->status;
3942
3943 hci_dev_lock(hdev);
3944
3945 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3946 if (!conn)
3947 goto unlock;
3948
3949 if (rp->status) {
3950 hci_connect_cfm(conn, rp->status);
3951 hci_conn_del(conn);
3952 goto unlock;
3953 }
3954
3955 switch (cp->direction) {
3956 /* Input (Host to Controller) */
3957 case 0x00:
3958 /* Only confirm connection if output only */
3959 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3960 hci_connect_cfm(conn, rp->status);
3961 break;
3962 /* Output (Controller to Host) */
3963 case 0x01:
3964 /* Confirm connection since conn->iso_qos is always configured
3965 * last.
3966 */
3967 hci_connect_cfm(conn, rp->status);
3968
3969 /* Notify device connected in case it is a BIG Sync */
3970 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3971 mgmt_device_connected(hdev, conn, NULL, 0);
3972
3973 break;
3974 }
3975
3976 unlock:
3977 hci_dev_unlock(hdev);
3978 return rp->status;
3979 }
3980
hci_cs_le_create_big(struct hci_dev * hdev,u8 status)3981 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3982 {
3983 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3984 }
3985
hci_cc_set_per_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)3986 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3987 struct sk_buff *skb)
3988 {
3989 struct hci_ev_status *rp = data;
3990 struct hci_cp_le_set_per_adv_params *cp;
3991
3992 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3993
3994 if (rp->status)
3995 return rp->status;
3996
3997 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3998 if (!cp)
3999 return rp->status;
4000
4001 /* TODO: set the conn state */
4002 return rp->status;
4003 }
4004
hci_cc_le_set_per_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)4005 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4006 struct sk_buff *skb)
4007 {
4008 struct hci_ev_status *rp = data;
4009 struct hci_cp_le_set_per_adv_enable *cp;
4010 struct adv_info *adv = NULL, *n;
4011 u8 per_adv_cnt = 0;
4012
4013 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4014
4015 if (rp->status)
4016 return rp->status;
4017
4018 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4019 if (!cp)
4020 return rp->status;
4021
4022 hci_dev_lock(hdev);
4023
4024 adv = hci_find_adv_instance(hdev, cp->handle);
4025
4026 if (cp->enable) {
4027 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4028
4029 if (adv)
4030 adv->enabled = true;
4031 } else {
4032 /* If just one instance was disabled check if there are
4033 * any other instance enabled before clearing HCI_LE_PER_ADV.
4034 * The current periodic adv instance will be marked as
4035 * disabled once extended advertising is also disabled.
4036 */
4037 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4038 list) {
4039 if (adv->periodic && adv->enabled)
4040 per_adv_cnt++;
4041 }
4042
4043 if (per_adv_cnt > 1)
4044 goto unlock;
4045
4046 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4047 }
4048
4049 unlock:
4050 hci_dev_unlock(hdev);
4051
4052 return rp->status;
4053 }
4054
4055 #define HCI_CC_VL(_op, _func, _min, _max) \
4056 { \
4057 .op = _op, \
4058 .func = _func, \
4059 .min_len = _min, \
4060 .max_len = _max, \
4061 }
4062
4063 #define HCI_CC(_op, _func, _len) \
4064 HCI_CC_VL(_op, _func, _len, _len)
4065
4066 #define HCI_CC_STATUS(_op, _func) \
4067 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4068
4069 static const struct hci_cc {
4070 u16 op;
4071 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4072 u16 min_len;
4073 u16 max_len;
4074 } hci_cc_table[] = {
4075 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4076 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4077 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4078 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4079 hci_cc_remote_name_req_cancel),
4080 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4081 sizeof(struct hci_rp_role_discovery)),
4082 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4083 sizeof(struct hci_rp_read_link_policy)),
4084 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4085 sizeof(struct hci_rp_write_link_policy)),
4086 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4087 sizeof(struct hci_rp_read_def_link_policy)),
4088 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4089 hci_cc_write_def_link_policy),
4090 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4091 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4092 sizeof(struct hci_rp_read_stored_link_key)),
4093 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4094 sizeof(struct hci_rp_delete_stored_link_key)),
4095 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4096 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4097 sizeof(struct hci_rp_read_local_name)),
4098 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4099 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4100 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4101 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4102 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4103 sizeof(struct hci_rp_read_class_of_dev)),
4104 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4105 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4106 sizeof(struct hci_rp_read_voice_setting)),
4107 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4108 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4109 sizeof(struct hci_rp_read_num_supported_iac)),
4110 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4111 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4112 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4113 sizeof(struct hci_rp_read_auth_payload_to)),
4114 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4115 sizeof(struct hci_rp_write_auth_payload_to)),
4116 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4117 sizeof(struct hci_rp_read_local_version)),
4118 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4119 sizeof(struct hci_rp_read_local_commands)),
4120 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4121 sizeof(struct hci_rp_read_local_features)),
4122 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4123 sizeof(struct hci_rp_read_local_ext_features)),
4124 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4125 sizeof(struct hci_rp_read_buffer_size)),
4126 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4127 sizeof(struct hci_rp_read_bd_addr)),
4128 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4129 sizeof(struct hci_rp_read_local_pairing_opts)),
4130 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4131 sizeof(struct hci_rp_read_page_scan_activity)),
4132 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4133 hci_cc_write_page_scan_activity),
4134 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4135 sizeof(struct hci_rp_read_page_scan_type)),
4136 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4137 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4138 sizeof(struct hci_rp_read_data_block_size)),
4139 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4140 sizeof(struct hci_rp_read_flow_control_mode)),
4141 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4142 sizeof(struct hci_rp_read_local_amp_info)),
4143 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4144 sizeof(struct hci_rp_read_clock)),
4145 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4146 sizeof(struct hci_rp_read_enc_key_size)),
4147 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4148 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4149 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4150 hci_cc_read_def_err_data_reporting,
4151 sizeof(struct hci_rp_read_def_err_data_reporting)),
4152 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4153 hci_cc_write_def_err_data_reporting),
4154 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4155 sizeof(struct hci_rp_pin_code_reply)),
4156 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4157 sizeof(struct hci_rp_pin_code_neg_reply)),
4158 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4159 sizeof(struct hci_rp_read_local_oob_data)),
4160 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4161 sizeof(struct hci_rp_read_local_oob_ext_data)),
4162 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4163 sizeof(struct hci_rp_le_read_buffer_size)),
4164 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4165 sizeof(struct hci_rp_le_read_local_features)),
4166 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4167 sizeof(struct hci_rp_le_read_adv_tx_power)),
4168 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4169 sizeof(struct hci_rp_user_confirm_reply)),
4170 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4171 sizeof(struct hci_rp_user_confirm_reply)),
4172 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4173 sizeof(struct hci_rp_user_confirm_reply)),
4174 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4175 sizeof(struct hci_rp_user_confirm_reply)),
4176 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4177 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4178 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4179 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4180 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4181 hci_cc_le_read_accept_list_size,
4182 sizeof(struct hci_rp_le_read_accept_list_size)),
4183 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4184 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4185 hci_cc_le_add_to_accept_list),
4186 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4187 hci_cc_le_del_from_accept_list),
4188 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4189 sizeof(struct hci_rp_le_read_supported_states)),
4190 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4191 sizeof(struct hci_rp_le_read_def_data_len)),
4192 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4193 hci_cc_le_write_def_data_len),
4194 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4195 hci_cc_le_add_to_resolv_list),
4196 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4197 hci_cc_le_del_from_resolv_list),
4198 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4199 hci_cc_le_clear_resolv_list),
4200 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4201 sizeof(struct hci_rp_le_read_resolv_list_size)),
4202 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4203 hci_cc_le_set_addr_resolution_enable),
4204 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4205 sizeof(struct hci_rp_le_read_max_data_len)),
4206 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4207 hci_cc_write_le_host_supported),
4208 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4209 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4210 sizeof(struct hci_rp_read_rssi)),
4211 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4212 sizeof(struct hci_rp_read_tx_power)),
4213 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4214 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4215 hci_cc_le_set_ext_scan_param),
4216 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4217 hci_cc_le_set_ext_scan_enable),
4218 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4219 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4220 hci_cc_le_read_num_adv_sets,
4221 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4222 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4223 sizeof(struct hci_rp_le_set_ext_adv_params)),
4224 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4225 hci_cc_le_set_ext_adv_enable),
4226 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4227 hci_cc_le_set_adv_set_random_addr),
4228 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4229 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4230 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4231 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4232 hci_cc_le_set_per_adv_enable),
4233 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4234 sizeof(struct hci_rp_le_read_transmit_power)),
4235 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4236 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4237 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4238 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4239 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4240 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4241 sizeof(struct hci_rp_le_setup_iso_path)),
4242 };
4243
hci_cc_func(struct hci_dev * hdev,const struct hci_cc * cc,struct sk_buff * skb)4244 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4245 struct sk_buff *skb)
4246 {
4247 void *data;
4248
4249 if (skb->len < cc->min_len) {
4250 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4251 cc->op, skb->len, cc->min_len);
4252 return HCI_ERROR_UNSPECIFIED;
4253 }
4254
4255 /* Just warn if the length is over max_len size it still be possible to
4256 * partially parse the cc so leave to callback to decide if that is
4257 * acceptable.
4258 */
4259 if (skb->len > cc->max_len)
4260 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4261 cc->op, skb->len, cc->max_len);
4262
4263 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4264 if (!data)
4265 return HCI_ERROR_UNSPECIFIED;
4266
4267 return cc->func(hdev, data, skb);
4268 }
4269
hci_cmd_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4270 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4271 struct sk_buff *skb, u16 *opcode, u8 *status,
4272 hci_req_complete_t *req_complete,
4273 hci_req_complete_skb_t *req_complete_skb)
4274 {
4275 struct hci_ev_cmd_complete *ev = data;
4276 int i;
4277
4278 *opcode = __le16_to_cpu(ev->opcode);
4279
4280 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4281
4282 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4283 if (hci_cc_table[i].op == *opcode) {
4284 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4285 break;
4286 }
4287 }
4288
4289 if (i == ARRAY_SIZE(hci_cc_table)) {
4290 /* Unknown opcode, assume byte 0 contains the status, so
4291 * that e.g. __hci_cmd_sync() properly returns errors
4292 * for vendor specific commands send by HCI drivers.
4293 * If a vendor doesn't actually follow this convention we may
4294 * need to introduce a vendor CC table in order to properly set
4295 * the status.
4296 */
4297 *status = skb->data[0];
4298 }
4299
4300 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4301
4302 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4303 req_complete_skb);
4304
4305 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4306 bt_dev_err(hdev,
4307 "unexpected event for opcode 0x%4.4x", *opcode);
4308 return;
4309 }
4310
4311 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4312 queue_work(hdev->workqueue, &hdev->cmd_work);
4313 }
4314
hci_cs_le_create_cis(struct hci_dev * hdev,u8 status)4315 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4316 {
4317 struct hci_cp_le_create_cis *cp;
4318 bool pending = false;
4319 int i;
4320
4321 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4322
4323 if (!status)
4324 return;
4325
4326 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4327 if (!cp)
4328 return;
4329
4330 hci_dev_lock(hdev);
4331
4332 /* Remove connection if command failed */
4333 for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4334 struct hci_conn *conn;
4335 u16 handle;
4336
4337 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4338
4339 conn = hci_conn_hash_lookup_handle(hdev, handle);
4340 if (conn) {
4341 if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4342 &conn->flags))
4343 pending = true;
4344 conn->state = BT_CLOSED;
4345 hci_connect_cfm(conn, status);
4346 hci_conn_del(conn);
4347 }
4348 }
4349
4350 if (pending)
4351 hci_le_create_cis_pending(hdev);
4352
4353 hci_dev_unlock(hdev);
4354 }
4355
4356 #define HCI_CS(_op, _func) \
4357 { \
4358 .op = _op, \
4359 .func = _func, \
4360 }
4361
4362 static const struct hci_cs {
4363 u16 op;
4364 void (*func)(struct hci_dev *hdev, __u8 status);
4365 } hci_cs_table[] = {
4366 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4367 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4368 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4369 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4370 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4371 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4372 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4373 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4374 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4375 hci_cs_read_remote_ext_features),
4376 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4377 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4378 hci_cs_enhanced_setup_sync_conn),
4379 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4380 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4381 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4382 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4383 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4384 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4385 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4386 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4387 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4388 };
4389
hci_cmd_status_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4390 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4391 struct sk_buff *skb, u16 *opcode, u8 *status,
4392 hci_req_complete_t *req_complete,
4393 hci_req_complete_skb_t *req_complete_skb)
4394 {
4395 struct hci_ev_cmd_status *ev = data;
4396 int i;
4397
4398 *opcode = __le16_to_cpu(ev->opcode);
4399 *status = ev->status;
4400
4401 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4402
4403 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4404 if (hci_cs_table[i].op == *opcode) {
4405 hci_cs_table[i].func(hdev, ev->status);
4406 break;
4407 }
4408 }
4409
4410 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4411
4412 /* Indicate request completion if the command failed. Also, if
4413 * we're not waiting for a special event and we get a success
4414 * command status we should try to flag the request as completed
4415 * (since for this kind of commands there will not be a command
4416 * complete event).
4417 */
4418 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4419 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4420 req_complete_skb);
4421 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4422 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4423 *opcode);
4424 return;
4425 }
4426 }
4427
4428 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4429 queue_work(hdev->workqueue, &hdev->cmd_work);
4430 }
4431
hci_hardware_error_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4432 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4433 struct sk_buff *skb)
4434 {
4435 struct hci_ev_hardware_error *ev = data;
4436
4437 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4438
4439 hdev->hw_error_code = ev->code;
4440
4441 queue_work(hdev->req_workqueue, &hdev->error_reset);
4442 }
4443
hci_role_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4444 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4445 struct sk_buff *skb)
4446 {
4447 struct hci_ev_role_change *ev = data;
4448 struct hci_conn *conn;
4449
4450 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4451
4452 hci_dev_lock(hdev);
4453
4454 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4455 if (conn) {
4456 if (!ev->status)
4457 conn->role = ev->role;
4458
4459 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4460
4461 hci_role_switch_cfm(conn, ev->status, ev->role);
4462 }
4463
4464 hci_dev_unlock(hdev);
4465 }
4466
hci_num_comp_pkts_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4467 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4468 struct sk_buff *skb)
4469 {
4470 struct hci_ev_num_comp_pkts *ev = data;
4471 int i;
4472
4473 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4474 flex_array_size(ev, handles, ev->num)))
4475 return;
4476
4477 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4478 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4479 return;
4480 }
4481
4482 bt_dev_dbg(hdev, "num %d", ev->num);
4483
4484 for (i = 0; i < ev->num; i++) {
4485 struct hci_comp_pkts_info *info = &ev->handles[i];
4486 struct hci_conn *conn;
4487 __u16 handle, count;
4488
4489 handle = __le16_to_cpu(info->handle);
4490 count = __le16_to_cpu(info->count);
4491
4492 conn = hci_conn_hash_lookup_handle(hdev, handle);
4493 if (!conn)
4494 continue;
4495
4496 conn->sent -= count;
4497
4498 switch (conn->type) {
4499 case ACL_LINK:
4500 hdev->acl_cnt += count;
4501 if (hdev->acl_cnt > hdev->acl_pkts)
4502 hdev->acl_cnt = hdev->acl_pkts;
4503 break;
4504
4505 case LE_LINK:
4506 if (hdev->le_pkts) {
4507 hdev->le_cnt += count;
4508 if (hdev->le_cnt > hdev->le_pkts)
4509 hdev->le_cnt = hdev->le_pkts;
4510 } else {
4511 hdev->acl_cnt += count;
4512 if (hdev->acl_cnt > hdev->acl_pkts)
4513 hdev->acl_cnt = hdev->acl_pkts;
4514 }
4515 break;
4516
4517 case SCO_LINK:
4518 hdev->sco_cnt += count;
4519 if (hdev->sco_cnt > hdev->sco_pkts)
4520 hdev->sco_cnt = hdev->sco_pkts;
4521 break;
4522
4523 case ISO_LINK:
4524 if (hdev->iso_pkts) {
4525 hdev->iso_cnt += count;
4526 if (hdev->iso_cnt > hdev->iso_pkts)
4527 hdev->iso_cnt = hdev->iso_pkts;
4528 } else if (hdev->le_pkts) {
4529 hdev->le_cnt += count;
4530 if (hdev->le_cnt > hdev->le_pkts)
4531 hdev->le_cnt = hdev->le_pkts;
4532 } else {
4533 hdev->acl_cnt += count;
4534 if (hdev->acl_cnt > hdev->acl_pkts)
4535 hdev->acl_cnt = hdev->acl_pkts;
4536 }
4537 break;
4538
4539 default:
4540 bt_dev_err(hdev, "unknown type %d conn %p",
4541 conn->type, conn);
4542 break;
4543 }
4544 }
4545
4546 queue_work(hdev->workqueue, &hdev->tx_work);
4547 }
4548
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)4549 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4550 __u16 handle)
4551 {
4552 struct hci_chan *chan;
4553
4554 switch (hdev->dev_type) {
4555 case HCI_PRIMARY:
4556 return hci_conn_hash_lookup_handle(hdev, handle);
4557 case HCI_AMP:
4558 chan = hci_chan_lookup_handle(hdev, handle);
4559 if (chan)
4560 return chan->conn;
4561 break;
4562 default:
4563 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4564 break;
4565 }
4566
4567 return NULL;
4568 }
4569
hci_num_comp_blocks_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4570 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4571 struct sk_buff *skb)
4572 {
4573 struct hci_ev_num_comp_blocks *ev = data;
4574 int i;
4575
4576 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4577 flex_array_size(ev, handles, ev->num_hndl)))
4578 return;
4579
4580 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4581 bt_dev_err(hdev, "wrong event for mode %d",
4582 hdev->flow_ctl_mode);
4583 return;
4584 }
4585
4586 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4587 ev->num_hndl);
4588
4589 for (i = 0; i < ev->num_hndl; i++) {
4590 struct hci_comp_blocks_info *info = &ev->handles[i];
4591 struct hci_conn *conn = NULL;
4592 __u16 handle, block_count;
4593
4594 handle = __le16_to_cpu(info->handle);
4595 block_count = __le16_to_cpu(info->blocks);
4596
4597 conn = __hci_conn_lookup_handle(hdev, handle);
4598 if (!conn)
4599 continue;
4600
4601 conn->sent -= block_count;
4602
4603 switch (conn->type) {
4604 case ACL_LINK:
4605 case AMP_LINK:
4606 hdev->block_cnt += block_count;
4607 if (hdev->block_cnt > hdev->num_blocks)
4608 hdev->block_cnt = hdev->num_blocks;
4609 break;
4610
4611 default:
4612 bt_dev_err(hdev, "unknown type %d conn %p",
4613 conn->type, conn);
4614 break;
4615 }
4616 }
4617
4618 queue_work(hdev->workqueue, &hdev->tx_work);
4619 }
4620
hci_mode_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4621 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4622 struct sk_buff *skb)
4623 {
4624 struct hci_ev_mode_change *ev = data;
4625 struct hci_conn *conn;
4626
4627 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4628
4629 hci_dev_lock(hdev);
4630
4631 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4632 if (conn) {
4633 conn->mode = ev->mode;
4634
4635 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4636 &conn->flags)) {
4637 if (conn->mode == HCI_CM_ACTIVE)
4638 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4639 else
4640 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4641 }
4642
4643 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4644 hci_sco_setup(conn, ev->status);
4645 }
4646
4647 hci_dev_unlock(hdev);
4648 }
4649
hci_pin_code_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4650 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4651 struct sk_buff *skb)
4652 {
4653 struct hci_ev_pin_code_req *ev = data;
4654 struct hci_conn *conn;
4655
4656 bt_dev_dbg(hdev, "");
4657
4658 hci_dev_lock(hdev);
4659
4660 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4661 if (!conn)
4662 goto unlock;
4663
4664 if (conn->state == BT_CONNECTED) {
4665 hci_conn_hold(conn);
4666 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4667 hci_conn_drop(conn);
4668 }
4669
4670 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4671 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4672 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4673 sizeof(ev->bdaddr), &ev->bdaddr);
4674 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4675 u8 secure;
4676
4677 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4678 secure = 1;
4679 else
4680 secure = 0;
4681
4682 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4683 }
4684
4685 unlock:
4686 hci_dev_unlock(hdev);
4687 }
4688
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4689 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4690 {
4691 if (key_type == HCI_LK_CHANGED_COMBINATION)
4692 return;
4693
4694 conn->pin_length = pin_len;
4695 conn->key_type = key_type;
4696
4697 switch (key_type) {
4698 case HCI_LK_LOCAL_UNIT:
4699 case HCI_LK_REMOTE_UNIT:
4700 case HCI_LK_DEBUG_COMBINATION:
4701 return;
4702 case HCI_LK_COMBINATION:
4703 if (pin_len == 16)
4704 conn->pending_sec_level = BT_SECURITY_HIGH;
4705 else
4706 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4707 break;
4708 case HCI_LK_UNAUTH_COMBINATION_P192:
4709 case HCI_LK_UNAUTH_COMBINATION_P256:
4710 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4711 break;
4712 case HCI_LK_AUTH_COMBINATION_P192:
4713 conn->pending_sec_level = BT_SECURITY_HIGH;
4714 break;
4715 case HCI_LK_AUTH_COMBINATION_P256:
4716 conn->pending_sec_level = BT_SECURITY_FIPS;
4717 break;
4718 }
4719 }
4720
hci_link_key_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4721 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4722 struct sk_buff *skb)
4723 {
4724 struct hci_ev_link_key_req *ev = data;
4725 struct hci_cp_link_key_reply cp;
4726 struct hci_conn *conn;
4727 struct link_key *key;
4728
4729 bt_dev_dbg(hdev, "");
4730
4731 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4732 return;
4733
4734 hci_dev_lock(hdev);
4735
4736 key = hci_find_link_key(hdev, &ev->bdaddr);
4737 if (!key) {
4738 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4739 goto not_found;
4740 }
4741
4742 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4743
4744 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4745 if (conn) {
4746 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4747
4748 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4749 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4750 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4751 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4752 goto not_found;
4753 }
4754
4755 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4756 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4757 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4758 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4759 goto not_found;
4760 }
4761
4762 conn_set_key(conn, key->type, key->pin_len);
4763 }
4764
4765 bacpy(&cp.bdaddr, &ev->bdaddr);
4766 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4767
4768 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4769
4770 hci_dev_unlock(hdev);
4771
4772 return;
4773
4774 not_found:
4775 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4776 hci_dev_unlock(hdev);
4777 }
4778
hci_link_key_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4779 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4780 struct sk_buff *skb)
4781 {
4782 struct hci_ev_link_key_notify *ev = data;
4783 struct hci_conn *conn;
4784 struct link_key *key;
4785 bool persistent;
4786 u8 pin_len = 0;
4787
4788 bt_dev_dbg(hdev, "");
4789
4790 hci_dev_lock(hdev);
4791
4792 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4793 if (!conn)
4794 goto unlock;
4795
4796 /* Ignore NULL link key against CVE-2020-26555 */
4797 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4798 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4799 &ev->bdaddr);
4800 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4801 hci_conn_drop(conn);
4802 goto unlock;
4803 }
4804
4805 hci_conn_hold(conn);
4806 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4807 hci_conn_drop(conn);
4808
4809 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4810 conn_set_key(conn, ev->key_type, conn->pin_length);
4811
4812 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4813 goto unlock;
4814
4815 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4816 ev->key_type, pin_len, &persistent);
4817 if (!key)
4818 goto unlock;
4819
4820 /* Update connection information since adding the key will have
4821 * fixed up the type in the case of changed combination keys.
4822 */
4823 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4824 conn_set_key(conn, key->type, key->pin_len);
4825
4826 mgmt_new_link_key(hdev, key, persistent);
4827
4828 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4829 * is set. If it's not set simply remove the key from the kernel
4830 * list (we've still notified user space about it but with
4831 * store_hint being 0).
4832 */
4833 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4834 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4835 list_del_rcu(&key->list);
4836 kfree_rcu(key, rcu);
4837 goto unlock;
4838 }
4839
4840 if (persistent)
4841 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4842 else
4843 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4844
4845 unlock:
4846 hci_dev_unlock(hdev);
4847 }
4848
hci_clock_offset_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4849 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4850 struct sk_buff *skb)
4851 {
4852 struct hci_ev_clock_offset *ev = data;
4853 struct hci_conn *conn;
4854
4855 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4856
4857 hci_dev_lock(hdev);
4858
4859 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4860 if (conn && !ev->status) {
4861 struct inquiry_entry *ie;
4862
4863 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4864 if (ie) {
4865 ie->data.clock_offset = ev->clock_offset;
4866 ie->timestamp = jiffies;
4867 }
4868 }
4869
4870 hci_dev_unlock(hdev);
4871 }
4872
hci_pkt_type_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4873 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4874 struct sk_buff *skb)
4875 {
4876 struct hci_ev_pkt_type_change *ev = data;
4877 struct hci_conn *conn;
4878
4879 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4880
4881 hci_dev_lock(hdev);
4882
4883 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4884 if (conn && !ev->status)
4885 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4886
4887 hci_dev_unlock(hdev);
4888 }
4889
hci_pscan_rep_mode_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4890 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4891 struct sk_buff *skb)
4892 {
4893 struct hci_ev_pscan_rep_mode *ev = data;
4894 struct inquiry_entry *ie;
4895
4896 bt_dev_dbg(hdev, "");
4897
4898 hci_dev_lock(hdev);
4899
4900 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4901 if (ie) {
4902 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4903 ie->timestamp = jiffies;
4904 }
4905
4906 hci_dev_unlock(hdev);
4907 }
4908
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)4909 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4910 struct sk_buff *skb)
4911 {
4912 struct hci_ev_inquiry_result_rssi *ev = edata;
4913 struct inquiry_data data;
4914 int i;
4915
4916 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4917
4918 if (!ev->num)
4919 return;
4920
4921 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4922 return;
4923
4924 hci_dev_lock(hdev);
4925
4926 if (skb->len == array_size(ev->num,
4927 sizeof(struct inquiry_info_rssi_pscan))) {
4928 struct inquiry_info_rssi_pscan *info;
4929
4930 for (i = 0; i < ev->num; i++) {
4931 u32 flags;
4932
4933 info = hci_ev_skb_pull(hdev, skb,
4934 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4935 sizeof(*info));
4936 if (!info) {
4937 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4938 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4939 goto unlock;
4940 }
4941
4942 bacpy(&data.bdaddr, &info->bdaddr);
4943 data.pscan_rep_mode = info->pscan_rep_mode;
4944 data.pscan_period_mode = info->pscan_period_mode;
4945 data.pscan_mode = info->pscan_mode;
4946 memcpy(data.dev_class, info->dev_class, 3);
4947 data.clock_offset = info->clock_offset;
4948 data.rssi = info->rssi;
4949 data.ssp_mode = 0x00;
4950
4951 flags = hci_inquiry_cache_update(hdev, &data, false);
4952
4953 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4954 info->dev_class, info->rssi,
4955 flags, NULL, 0, NULL, 0, 0);
4956 }
4957 } else if (skb->len == array_size(ev->num,
4958 sizeof(struct inquiry_info_rssi))) {
4959 struct inquiry_info_rssi *info;
4960
4961 for (i = 0; i < ev->num; i++) {
4962 u32 flags;
4963
4964 info = hci_ev_skb_pull(hdev, skb,
4965 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4966 sizeof(*info));
4967 if (!info) {
4968 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4969 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4970 goto unlock;
4971 }
4972
4973 bacpy(&data.bdaddr, &info->bdaddr);
4974 data.pscan_rep_mode = info->pscan_rep_mode;
4975 data.pscan_period_mode = info->pscan_period_mode;
4976 data.pscan_mode = 0x00;
4977 memcpy(data.dev_class, info->dev_class, 3);
4978 data.clock_offset = info->clock_offset;
4979 data.rssi = info->rssi;
4980 data.ssp_mode = 0x00;
4981
4982 flags = hci_inquiry_cache_update(hdev, &data, false);
4983
4984 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4985 info->dev_class, info->rssi,
4986 flags, NULL, 0, NULL, 0, 0);
4987 }
4988 } else {
4989 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4990 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4991 }
4992 unlock:
4993 hci_dev_unlock(hdev);
4994 }
4995
hci_remote_ext_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4996 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4997 struct sk_buff *skb)
4998 {
4999 struct hci_ev_remote_ext_features *ev = data;
5000 struct hci_conn *conn;
5001
5002 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5003
5004 hci_dev_lock(hdev);
5005
5006 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5007 if (!conn)
5008 goto unlock;
5009
5010 if (ev->page < HCI_MAX_PAGES)
5011 memcpy(conn->features[ev->page], ev->features, 8);
5012
5013 if (!ev->status && ev->page == 0x01) {
5014 struct inquiry_entry *ie;
5015
5016 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5017 if (ie)
5018 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5019
5020 if (ev->features[0] & LMP_HOST_SSP) {
5021 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5022 } else {
5023 /* It is mandatory by the Bluetooth specification that
5024 * Extended Inquiry Results are only used when Secure
5025 * Simple Pairing is enabled, but some devices violate
5026 * this.
5027 *
5028 * To make these devices work, the internal SSP
5029 * enabled flag needs to be cleared if the remote host
5030 * features do not indicate SSP support */
5031 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5032 }
5033
5034 if (ev->features[0] & LMP_HOST_SC)
5035 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5036 }
5037
5038 if (conn->state != BT_CONFIG)
5039 goto unlock;
5040
5041 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5042 struct hci_cp_remote_name_req cp;
5043 memset(&cp, 0, sizeof(cp));
5044 bacpy(&cp.bdaddr, &conn->dst);
5045 cp.pscan_rep_mode = 0x02;
5046 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5047 } else {
5048 mgmt_device_connected(hdev, conn, NULL, 0);
5049 }
5050
5051 if (!hci_outgoing_auth_needed(hdev, conn)) {
5052 conn->state = BT_CONNECTED;
5053 hci_connect_cfm(conn, ev->status);
5054 hci_conn_drop(conn);
5055 }
5056
5057 unlock:
5058 hci_dev_unlock(hdev);
5059 }
5060
hci_sync_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5061 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5062 struct sk_buff *skb)
5063 {
5064 struct hci_ev_sync_conn_complete *ev = data;
5065 struct hci_conn *conn;
5066 u8 status = ev->status;
5067
5068 switch (ev->link_type) {
5069 case SCO_LINK:
5070 case ESCO_LINK:
5071 break;
5072 default:
5073 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5074 * for HCI_Synchronous_Connection_Complete is limited to
5075 * either SCO or eSCO
5076 */
5077 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5078 return;
5079 }
5080
5081 bt_dev_dbg(hdev, "status 0x%2.2x", status);
5082
5083 hci_dev_lock(hdev);
5084
5085 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5086 if (!conn) {
5087 if (ev->link_type == ESCO_LINK)
5088 goto unlock;
5089
5090 /* When the link type in the event indicates SCO connection
5091 * and lookup of the connection object fails, then check
5092 * if an eSCO connection object exists.
5093 *
5094 * The core limits the synchronous connections to either
5095 * SCO or eSCO. The eSCO connection is preferred and tried
5096 * to be setup first and until successfully established,
5097 * the link type will be hinted as eSCO.
5098 */
5099 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5100 if (!conn)
5101 goto unlock;
5102 }
5103
5104 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5105 * Processing it more than once per connection can corrupt kernel memory.
5106 *
5107 * As the connection handle is set here for the first time, it indicates
5108 * whether the connection is already set up.
5109 */
5110 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5111 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5112 goto unlock;
5113 }
5114
5115 switch (status) {
5116 case 0x00:
5117 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5118 if (status) {
5119 conn->state = BT_CLOSED;
5120 break;
5121 }
5122
5123 conn->state = BT_CONNECTED;
5124 conn->type = ev->link_type;
5125
5126 hci_debugfs_create_conn(conn);
5127 hci_conn_add_sysfs(conn);
5128 break;
5129
5130 case 0x10: /* Connection Accept Timeout */
5131 case 0x0d: /* Connection Rejected due to Limited Resources */
5132 case 0x11: /* Unsupported Feature or Parameter Value */
5133 case 0x1c: /* SCO interval rejected */
5134 case 0x1a: /* Unsupported Remote Feature */
5135 case 0x1e: /* Invalid LMP Parameters */
5136 case 0x1f: /* Unspecified error */
5137 case 0x20: /* Unsupported LMP Parameter value */
5138 if (conn->out) {
5139 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5140 (hdev->esco_type & EDR_ESCO_MASK);
5141 if (hci_setup_sync(conn, conn->parent->handle))
5142 goto unlock;
5143 }
5144 fallthrough;
5145
5146 default:
5147 conn->state = BT_CLOSED;
5148 break;
5149 }
5150
5151 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5152 /* Notify only in case of SCO over HCI transport data path which
5153 * is zero and non-zero value shall be non-HCI transport data path
5154 */
5155 if (conn->codec.data_path == 0 && hdev->notify) {
5156 switch (ev->air_mode) {
5157 case 0x02:
5158 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5159 break;
5160 case 0x03:
5161 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5162 break;
5163 }
5164 }
5165
5166 hci_connect_cfm(conn, status);
5167 if (status)
5168 hci_conn_del(conn);
5169
5170 unlock:
5171 hci_dev_unlock(hdev);
5172 }
5173
eir_get_length(u8 * eir,size_t eir_len)5174 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5175 {
5176 size_t parsed = 0;
5177
5178 while (parsed < eir_len) {
5179 u8 field_len = eir[0];
5180
5181 if (field_len == 0)
5182 return parsed;
5183
5184 parsed += field_len + 1;
5185 eir += field_len + 1;
5186 }
5187
5188 return eir_len;
5189 }
5190
hci_extended_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5191 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5192 struct sk_buff *skb)
5193 {
5194 struct hci_ev_ext_inquiry_result *ev = edata;
5195 struct inquiry_data data;
5196 size_t eir_len;
5197 int i;
5198
5199 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5200 flex_array_size(ev, info, ev->num)))
5201 return;
5202
5203 bt_dev_dbg(hdev, "num %d", ev->num);
5204
5205 if (!ev->num)
5206 return;
5207
5208 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5209 return;
5210
5211 hci_dev_lock(hdev);
5212
5213 for (i = 0; i < ev->num; i++) {
5214 struct extended_inquiry_info *info = &ev->info[i];
5215 u32 flags;
5216 bool name_known;
5217
5218 bacpy(&data.bdaddr, &info->bdaddr);
5219 data.pscan_rep_mode = info->pscan_rep_mode;
5220 data.pscan_period_mode = info->pscan_period_mode;
5221 data.pscan_mode = 0x00;
5222 memcpy(data.dev_class, info->dev_class, 3);
5223 data.clock_offset = info->clock_offset;
5224 data.rssi = info->rssi;
5225 data.ssp_mode = 0x01;
5226
5227 if (hci_dev_test_flag(hdev, HCI_MGMT))
5228 name_known = eir_get_data(info->data,
5229 sizeof(info->data),
5230 EIR_NAME_COMPLETE, NULL);
5231 else
5232 name_known = true;
5233
5234 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5235
5236 eir_len = eir_get_length(info->data, sizeof(info->data));
5237
5238 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5239 info->dev_class, info->rssi,
5240 flags, info->data, eir_len, NULL, 0, 0);
5241 }
5242
5243 hci_dev_unlock(hdev);
5244 }
5245
hci_key_refresh_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5246 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5247 struct sk_buff *skb)
5248 {
5249 struct hci_ev_key_refresh_complete *ev = data;
5250 struct hci_conn *conn;
5251
5252 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5253 __le16_to_cpu(ev->handle));
5254
5255 hci_dev_lock(hdev);
5256
5257 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5258 if (!conn)
5259 goto unlock;
5260
5261 /* For BR/EDR the necessary steps are taken through the
5262 * auth_complete event.
5263 */
5264 if (conn->type != LE_LINK)
5265 goto unlock;
5266
5267 if (!ev->status)
5268 conn->sec_level = conn->pending_sec_level;
5269
5270 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5271
5272 if (ev->status && conn->state == BT_CONNECTED) {
5273 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5274 hci_conn_drop(conn);
5275 goto unlock;
5276 }
5277
5278 if (conn->state == BT_CONFIG) {
5279 if (!ev->status)
5280 conn->state = BT_CONNECTED;
5281
5282 hci_connect_cfm(conn, ev->status);
5283 hci_conn_drop(conn);
5284 } else {
5285 hci_auth_cfm(conn, ev->status);
5286
5287 hci_conn_hold(conn);
5288 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5289 hci_conn_drop(conn);
5290 }
5291
5292 unlock:
5293 hci_dev_unlock(hdev);
5294 }
5295
hci_get_auth_req(struct hci_conn * conn)5296 static u8 hci_get_auth_req(struct hci_conn *conn)
5297 {
5298 /* If remote requests no-bonding follow that lead */
5299 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5300 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5301 return conn->remote_auth | (conn->auth_type & 0x01);
5302
5303 /* If both remote and local have enough IO capabilities, require
5304 * MITM protection
5305 */
5306 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5307 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5308 return conn->remote_auth | 0x01;
5309
5310 /* No MITM protection possible so ignore remote requirement */
5311 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5312 }
5313
bredr_oob_data_present(struct hci_conn * conn)5314 static u8 bredr_oob_data_present(struct hci_conn *conn)
5315 {
5316 struct hci_dev *hdev = conn->hdev;
5317 struct oob_data *data;
5318
5319 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5320 if (!data)
5321 return 0x00;
5322
5323 if (bredr_sc_enabled(hdev)) {
5324 /* When Secure Connections is enabled, then just
5325 * return the present value stored with the OOB
5326 * data. The stored value contains the right present
5327 * information. However it can only be trusted when
5328 * not in Secure Connection Only mode.
5329 */
5330 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5331 return data->present;
5332
5333 /* When Secure Connections Only mode is enabled, then
5334 * the P-256 values are required. If they are not
5335 * available, then do not declare that OOB data is
5336 * present.
5337 */
5338 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5339 !crypto_memneq(data->hash256, ZERO_KEY, 16))
5340 return 0x00;
5341
5342 return 0x02;
5343 }
5344
5345 /* When Secure Connections is not enabled or actually
5346 * not supported by the hardware, then check that if
5347 * P-192 data values are present.
5348 */
5349 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5350 !crypto_memneq(data->hash192, ZERO_KEY, 16))
5351 return 0x00;
5352
5353 return 0x01;
5354 }
5355
hci_io_capa_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5356 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5357 struct sk_buff *skb)
5358 {
5359 struct hci_ev_io_capa_request *ev = data;
5360 struct hci_conn *conn;
5361
5362 bt_dev_dbg(hdev, "");
5363
5364 hci_dev_lock(hdev);
5365
5366 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5367 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5368 goto unlock;
5369
5370 /* Assume remote supports SSP since it has triggered this event */
5371 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5372
5373 hci_conn_hold(conn);
5374
5375 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5376 goto unlock;
5377
5378 /* Allow pairing if we're pairable, the initiators of the
5379 * pairing or if the remote is not requesting bonding.
5380 */
5381 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5382 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5383 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5384 struct hci_cp_io_capability_reply cp;
5385
5386 bacpy(&cp.bdaddr, &ev->bdaddr);
5387 /* Change the IO capability from KeyboardDisplay
5388 * to DisplayYesNo as it is not supported by BT spec. */
5389 cp.capability = (conn->io_capability == 0x04) ?
5390 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5391
5392 /* If we are initiators, there is no remote information yet */
5393 if (conn->remote_auth == 0xff) {
5394 /* Request MITM protection if our IO caps allow it
5395 * except for the no-bonding case.
5396 */
5397 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5398 conn->auth_type != HCI_AT_NO_BONDING)
5399 conn->auth_type |= 0x01;
5400 } else {
5401 conn->auth_type = hci_get_auth_req(conn);
5402 }
5403
5404 /* If we're not bondable, force one of the non-bondable
5405 * authentication requirement values.
5406 */
5407 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5408 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5409
5410 cp.authentication = conn->auth_type;
5411 cp.oob_data = bredr_oob_data_present(conn);
5412
5413 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5414 sizeof(cp), &cp);
5415 } else {
5416 struct hci_cp_io_capability_neg_reply cp;
5417
5418 bacpy(&cp.bdaddr, &ev->bdaddr);
5419 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5420
5421 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5422 sizeof(cp), &cp);
5423 }
5424
5425 unlock:
5426 hci_dev_unlock(hdev);
5427 }
5428
hci_io_capa_reply_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5429 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5430 struct sk_buff *skb)
5431 {
5432 struct hci_ev_io_capa_reply *ev = data;
5433 struct hci_conn *conn;
5434
5435 bt_dev_dbg(hdev, "");
5436
5437 hci_dev_lock(hdev);
5438
5439 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5440 if (!conn)
5441 goto unlock;
5442
5443 conn->remote_cap = ev->capability;
5444 conn->remote_auth = ev->authentication;
5445
5446 unlock:
5447 hci_dev_unlock(hdev);
5448 }
5449
hci_user_confirm_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5450 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5451 struct sk_buff *skb)
5452 {
5453 struct hci_ev_user_confirm_req *ev = data;
5454 int loc_mitm, rem_mitm, confirm_hint = 0;
5455 struct hci_conn *conn;
5456
5457 bt_dev_dbg(hdev, "");
5458
5459 hci_dev_lock(hdev);
5460
5461 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5462 goto unlock;
5463
5464 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5465 if (!conn)
5466 goto unlock;
5467
5468 loc_mitm = (conn->auth_type & 0x01);
5469 rem_mitm = (conn->remote_auth & 0x01);
5470
5471 /* If we require MITM but the remote device can't provide that
5472 * (it has NoInputNoOutput) then reject the confirmation
5473 * request. We check the security level here since it doesn't
5474 * necessarily match conn->auth_type.
5475 */
5476 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5477 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5478 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5479 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5480 sizeof(ev->bdaddr), &ev->bdaddr);
5481 goto unlock;
5482 }
5483
5484 /* If no side requires MITM protection; auto-accept */
5485 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5486 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5487
5488 /* If we're not the initiators request authorization to
5489 * proceed from user space (mgmt_user_confirm with
5490 * confirm_hint set to 1). The exception is if neither
5491 * side had MITM or if the local IO capability is
5492 * NoInputNoOutput, in which case we do auto-accept
5493 */
5494 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5495 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5496 (loc_mitm || rem_mitm)) {
5497 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5498 confirm_hint = 1;
5499 goto confirm;
5500 }
5501
5502 /* If there already exists link key in local host, leave the
5503 * decision to user space since the remote device could be
5504 * legitimate or malicious.
5505 */
5506 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5507 bt_dev_dbg(hdev, "Local host already has link key");
5508 confirm_hint = 1;
5509 goto confirm;
5510 }
5511
5512 BT_DBG("Auto-accept of user confirmation with %ums delay",
5513 hdev->auto_accept_delay);
5514
5515 if (hdev->auto_accept_delay > 0) {
5516 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5517 queue_delayed_work(conn->hdev->workqueue,
5518 &conn->auto_accept_work, delay);
5519 goto unlock;
5520 }
5521
5522 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5523 sizeof(ev->bdaddr), &ev->bdaddr);
5524 goto unlock;
5525 }
5526
5527 confirm:
5528 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5529 le32_to_cpu(ev->passkey), confirm_hint);
5530
5531 unlock:
5532 hci_dev_unlock(hdev);
5533 }
5534
hci_user_passkey_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5535 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5536 struct sk_buff *skb)
5537 {
5538 struct hci_ev_user_passkey_req *ev = data;
5539
5540 bt_dev_dbg(hdev, "");
5541
5542 if (hci_dev_test_flag(hdev, HCI_MGMT))
5543 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5544 }
5545
hci_user_passkey_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5546 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5547 struct sk_buff *skb)
5548 {
5549 struct hci_ev_user_passkey_notify *ev = data;
5550 struct hci_conn *conn;
5551
5552 bt_dev_dbg(hdev, "");
5553
5554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5555 if (!conn)
5556 return;
5557
5558 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5559 conn->passkey_entered = 0;
5560
5561 if (hci_dev_test_flag(hdev, HCI_MGMT))
5562 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5563 conn->dst_type, conn->passkey_notify,
5564 conn->passkey_entered);
5565 }
5566
hci_keypress_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5567 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5568 struct sk_buff *skb)
5569 {
5570 struct hci_ev_keypress_notify *ev = data;
5571 struct hci_conn *conn;
5572
5573 bt_dev_dbg(hdev, "");
5574
5575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5576 if (!conn)
5577 return;
5578
5579 switch (ev->type) {
5580 case HCI_KEYPRESS_STARTED:
5581 conn->passkey_entered = 0;
5582 return;
5583
5584 case HCI_KEYPRESS_ENTERED:
5585 conn->passkey_entered++;
5586 break;
5587
5588 case HCI_KEYPRESS_ERASED:
5589 conn->passkey_entered--;
5590 break;
5591
5592 case HCI_KEYPRESS_CLEARED:
5593 conn->passkey_entered = 0;
5594 break;
5595
5596 case HCI_KEYPRESS_COMPLETED:
5597 return;
5598 }
5599
5600 if (hci_dev_test_flag(hdev, HCI_MGMT))
5601 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5602 conn->dst_type, conn->passkey_notify,
5603 conn->passkey_entered);
5604 }
5605
hci_simple_pair_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5606 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5607 struct sk_buff *skb)
5608 {
5609 struct hci_ev_simple_pair_complete *ev = data;
5610 struct hci_conn *conn;
5611
5612 bt_dev_dbg(hdev, "");
5613
5614 hci_dev_lock(hdev);
5615
5616 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5617 if (!conn || !hci_conn_ssp_enabled(conn))
5618 goto unlock;
5619
5620 /* Reset the authentication requirement to unknown */
5621 conn->remote_auth = 0xff;
5622
5623 /* To avoid duplicate auth_failed events to user space we check
5624 * the HCI_CONN_AUTH_PEND flag which will be set if we
5625 * initiated the authentication. A traditional auth_complete
5626 * event gets always produced as initiator and is also mapped to
5627 * the mgmt_auth_failed event */
5628 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5629 mgmt_auth_failed(conn, ev->status);
5630
5631 hci_conn_drop(conn);
5632
5633 unlock:
5634 hci_dev_unlock(hdev);
5635 }
5636
hci_remote_host_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5637 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5638 struct sk_buff *skb)
5639 {
5640 struct hci_ev_remote_host_features *ev = data;
5641 struct inquiry_entry *ie;
5642 struct hci_conn *conn;
5643
5644 bt_dev_dbg(hdev, "");
5645
5646 hci_dev_lock(hdev);
5647
5648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5649 if (conn)
5650 memcpy(conn->features[1], ev->features, 8);
5651
5652 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5653 if (ie)
5654 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5655
5656 hci_dev_unlock(hdev);
5657 }
5658
hci_remote_oob_data_request_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5659 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5660 struct sk_buff *skb)
5661 {
5662 struct hci_ev_remote_oob_data_request *ev = edata;
5663 struct oob_data *data;
5664
5665 bt_dev_dbg(hdev, "");
5666
5667 hci_dev_lock(hdev);
5668
5669 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5670 goto unlock;
5671
5672 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5673 if (!data) {
5674 struct hci_cp_remote_oob_data_neg_reply cp;
5675
5676 bacpy(&cp.bdaddr, &ev->bdaddr);
5677 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5678 sizeof(cp), &cp);
5679 goto unlock;
5680 }
5681
5682 if (bredr_sc_enabled(hdev)) {
5683 struct hci_cp_remote_oob_ext_data_reply cp;
5684
5685 bacpy(&cp.bdaddr, &ev->bdaddr);
5686 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5687 memset(cp.hash192, 0, sizeof(cp.hash192));
5688 memset(cp.rand192, 0, sizeof(cp.rand192));
5689 } else {
5690 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5691 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5692 }
5693 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5694 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5695
5696 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5697 sizeof(cp), &cp);
5698 } else {
5699 struct hci_cp_remote_oob_data_reply cp;
5700
5701 bacpy(&cp.bdaddr, &ev->bdaddr);
5702 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5703 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5704
5705 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5706 sizeof(cp), &cp);
5707 }
5708
5709 unlock:
5710 hci_dev_unlock(hdev);
5711 }
5712
5713 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5714 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5715 struct sk_buff *skb)
5716 {
5717 struct hci_ev_channel_selected *ev = data;
5718 struct hci_conn *hcon;
5719
5720 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5721
5722 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5723 if (!hcon)
5724 return;
5725
5726 amp_read_loc_assoc_final_data(hdev, hcon);
5727 }
5728
hci_phy_link_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5729 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5730 struct sk_buff *skb)
5731 {
5732 struct hci_ev_phy_link_complete *ev = data;
5733 struct hci_conn *hcon, *bredr_hcon;
5734
5735 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5736 ev->status);
5737
5738 hci_dev_lock(hdev);
5739
5740 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5741 if (!hcon)
5742 goto unlock;
5743
5744 if (!hcon->amp_mgr)
5745 goto unlock;
5746
5747 if (ev->status) {
5748 hci_conn_del(hcon);
5749 goto unlock;
5750 }
5751
5752 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5753
5754 hcon->state = BT_CONNECTED;
5755 bacpy(&hcon->dst, &bredr_hcon->dst);
5756
5757 hci_conn_hold(hcon);
5758 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5759 hci_conn_drop(hcon);
5760
5761 hci_debugfs_create_conn(hcon);
5762 hci_conn_add_sysfs(hcon);
5763
5764 amp_physical_cfm(bredr_hcon, hcon);
5765
5766 unlock:
5767 hci_dev_unlock(hdev);
5768 }
5769
hci_loglink_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5770 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5771 struct sk_buff *skb)
5772 {
5773 struct hci_ev_logical_link_complete *ev = data;
5774 struct hci_conn *hcon;
5775 struct hci_chan *hchan;
5776 struct amp_mgr *mgr;
5777
5778 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5779 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5780
5781 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5782 if (!hcon)
5783 return;
5784
5785 /* Create AMP hchan */
5786 hchan = hci_chan_create(hcon);
5787 if (!hchan)
5788 return;
5789
5790 hchan->handle = le16_to_cpu(ev->handle);
5791 hchan->amp = true;
5792
5793 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5794
5795 mgr = hcon->amp_mgr;
5796 if (mgr && mgr->bredr_chan) {
5797 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5798
5799 l2cap_chan_lock(bredr_chan);
5800
5801 bredr_chan->conn->mtu = hdev->block_mtu;
5802 l2cap_logical_cfm(bredr_chan, hchan, 0);
5803 hci_conn_hold(hcon);
5804
5805 l2cap_chan_unlock(bredr_chan);
5806 }
5807 }
5808
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5809 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5810 struct sk_buff *skb)
5811 {
5812 struct hci_ev_disconn_logical_link_complete *ev = data;
5813 struct hci_chan *hchan;
5814
5815 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5816 le16_to_cpu(ev->handle), ev->status);
5817
5818 if (ev->status)
5819 return;
5820
5821 hci_dev_lock(hdev);
5822
5823 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5824 if (!hchan || !hchan->amp)
5825 goto unlock;
5826
5827 amp_destroy_logical_link(hchan, ev->reason);
5828
5829 unlock:
5830 hci_dev_unlock(hdev);
5831 }
5832
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5833 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5834 struct sk_buff *skb)
5835 {
5836 struct hci_ev_disconn_phy_link_complete *ev = data;
5837 struct hci_conn *hcon;
5838
5839 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5840
5841 if (ev->status)
5842 return;
5843
5844 hci_dev_lock(hdev);
5845
5846 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5847 if (hcon && hcon->type == AMP_LINK) {
5848 hcon->state = BT_CLOSED;
5849 hci_disconn_cfm(hcon, ev->reason);
5850 hci_conn_del(hcon);
5851 }
5852
5853 hci_dev_unlock(hdev);
5854 }
5855 #endif
5856
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5857 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5858 u8 bdaddr_type, bdaddr_t *local_rpa)
5859 {
5860 if (conn->out) {
5861 conn->dst_type = bdaddr_type;
5862 conn->resp_addr_type = bdaddr_type;
5863 bacpy(&conn->resp_addr, bdaddr);
5864
5865 /* Check if the controller has set a Local RPA then it must be
5866 * used instead or hdev->rpa.
5867 */
5868 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5869 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5870 bacpy(&conn->init_addr, local_rpa);
5871 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5872 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5873 bacpy(&conn->init_addr, &conn->hdev->rpa);
5874 } else {
5875 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5876 &conn->init_addr_type);
5877 }
5878 } else {
5879 conn->resp_addr_type = conn->hdev->adv_addr_type;
5880 /* Check if the controller has set a Local RPA then it must be
5881 * used instead or hdev->rpa.
5882 */
5883 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5884 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5885 bacpy(&conn->resp_addr, local_rpa);
5886 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5887 /* In case of ext adv, resp_addr will be updated in
5888 * Adv Terminated event.
5889 */
5890 if (!ext_adv_capable(conn->hdev))
5891 bacpy(&conn->resp_addr,
5892 &conn->hdev->random_addr);
5893 } else {
5894 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5895 }
5896
5897 conn->init_addr_type = bdaddr_type;
5898 bacpy(&conn->init_addr, bdaddr);
5899
5900 /* For incoming connections, set the default minimum
5901 * and maximum connection interval. They will be used
5902 * to check if the parameters are in range and if not
5903 * trigger the connection update procedure.
5904 */
5905 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5906 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5907 }
5908 }
5909
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5910 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5911 bdaddr_t *bdaddr, u8 bdaddr_type,
5912 bdaddr_t *local_rpa, u8 role, u16 handle,
5913 u16 interval, u16 latency,
5914 u16 supervision_timeout)
5915 {
5916 struct hci_conn_params *params;
5917 struct hci_conn *conn;
5918 struct smp_irk *irk;
5919 u8 addr_type;
5920
5921 hci_dev_lock(hdev);
5922
5923 /* All controllers implicitly stop advertising in the event of a
5924 * connection, so ensure that the state bit is cleared.
5925 */
5926 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5927
5928 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5929 if (!conn) {
5930 /* In case of error status and there is no connection pending
5931 * just unlock as there is nothing to cleanup.
5932 */
5933 if (status)
5934 goto unlock;
5935
5936 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5937 if (IS_ERR(conn)) {
5938 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5939 goto unlock;
5940 }
5941
5942 conn->dst_type = bdaddr_type;
5943
5944 /* If we didn't have a hci_conn object previously
5945 * but we're in central role this must be something
5946 * initiated using an accept list. Since accept list based
5947 * connections are not "first class citizens" we don't
5948 * have full tracking of them. Therefore, we go ahead
5949 * with a "best effort" approach of determining the
5950 * initiator address based on the HCI_PRIVACY flag.
5951 */
5952 if (conn->out) {
5953 conn->resp_addr_type = bdaddr_type;
5954 bacpy(&conn->resp_addr, bdaddr);
5955 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5956 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5957 bacpy(&conn->init_addr, &hdev->rpa);
5958 } else {
5959 hci_copy_identity_address(hdev,
5960 &conn->init_addr,
5961 &conn->init_addr_type);
5962 }
5963 }
5964 } else {
5965 cancel_delayed_work(&conn->le_conn_timeout);
5966 }
5967
5968 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5969 * Processing it more than once per connection can corrupt kernel memory.
5970 *
5971 * As the connection handle is set here for the first time, it indicates
5972 * whether the connection is already set up.
5973 */
5974 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5975 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5976 goto unlock;
5977 }
5978
5979 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5980
5981 /* Lookup the identity address from the stored connection
5982 * address and address type.
5983 *
5984 * When establishing connections to an identity address, the
5985 * connection procedure will store the resolvable random
5986 * address first. Now if it can be converted back into the
5987 * identity address, start using the identity address from
5988 * now on.
5989 */
5990 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5991 if (irk) {
5992 bacpy(&conn->dst, &irk->bdaddr);
5993 conn->dst_type = irk->addr_type;
5994 }
5995
5996 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5997
5998 /* All connection failure handling is taken care of by the
5999 * hci_conn_failed function which is triggered by the HCI
6000 * request completion callbacks used for connecting.
6001 */
6002 if (status || hci_conn_set_handle(conn, handle))
6003 goto unlock;
6004
6005 /* Drop the connection if it has been aborted */
6006 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
6007 hci_conn_drop(conn);
6008 goto unlock;
6009 }
6010
6011 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6012 addr_type = BDADDR_LE_PUBLIC;
6013 else
6014 addr_type = BDADDR_LE_RANDOM;
6015
6016 /* Drop the connection if the device is blocked */
6017 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6018 hci_conn_drop(conn);
6019 goto unlock;
6020 }
6021
6022 mgmt_device_connected(hdev, conn, NULL, 0);
6023
6024 conn->sec_level = BT_SECURITY_LOW;
6025 conn->state = BT_CONFIG;
6026
6027 /* Store current advertising instance as connection advertising instance
6028 * when sotfware rotation is in use so it can be re-enabled when
6029 * disconnected.
6030 */
6031 if (!ext_adv_capable(hdev))
6032 conn->adv_instance = hdev->cur_adv_instance;
6033
6034 conn->le_conn_interval = interval;
6035 conn->le_conn_latency = latency;
6036 conn->le_supv_timeout = supervision_timeout;
6037
6038 hci_debugfs_create_conn(conn);
6039 hci_conn_add_sysfs(conn);
6040
6041 /* The remote features procedure is defined for central
6042 * role only. So only in case of an initiated connection
6043 * request the remote features.
6044 *
6045 * If the local controller supports peripheral-initiated features
6046 * exchange, then requesting the remote features in peripheral
6047 * role is possible. Otherwise just transition into the
6048 * connected state without requesting the remote features.
6049 */
6050 if (conn->out ||
6051 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6052 struct hci_cp_le_read_remote_features cp;
6053
6054 cp.handle = __cpu_to_le16(conn->handle);
6055
6056 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6057 sizeof(cp), &cp);
6058
6059 hci_conn_hold(conn);
6060 } else {
6061 conn->state = BT_CONNECTED;
6062 hci_connect_cfm(conn, status);
6063 }
6064
6065 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6066 conn->dst_type);
6067 if (params) {
6068 hci_pend_le_list_del_init(params);
6069 if (params->conn) {
6070 hci_conn_drop(params->conn);
6071 hci_conn_put(params->conn);
6072 params->conn = NULL;
6073 }
6074 }
6075
6076 unlock:
6077 hci_update_passive_scan(hdev);
6078 hci_dev_unlock(hdev);
6079 }
6080
hci_le_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6081 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6082 struct sk_buff *skb)
6083 {
6084 struct hci_ev_le_conn_complete *ev = data;
6085
6086 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6087
6088 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6089 NULL, ev->role, le16_to_cpu(ev->handle),
6090 le16_to_cpu(ev->interval),
6091 le16_to_cpu(ev->latency),
6092 le16_to_cpu(ev->supervision_timeout));
6093 }
6094
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6095 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6096 struct sk_buff *skb)
6097 {
6098 struct hci_ev_le_enh_conn_complete *ev = data;
6099
6100 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6101
6102 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6103 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6104 le16_to_cpu(ev->interval),
6105 le16_to_cpu(ev->latency),
6106 le16_to_cpu(ev->supervision_timeout));
6107 }
6108
hci_le_ext_adv_term_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6109 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6110 struct sk_buff *skb)
6111 {
6112 struct hci_evt_le_ext_adv_set_term *ev = data;
6113 struct hci_conn *conn;
6114 struct adv_info *adv, *n;
6115
6116 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6117
6118 /* The Bluetooth Core 5.3 specification clearly states that this event
6119 * shall not be sent when the Host disables the advertising set. So in
6120 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6121 *
6122 * When the Host disables an advertising set, all cleanup is done via
6123 * its command callback and not needed to be duplicated here.
6124 */
6125 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6126 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6127 return;
6128 }
6129
6130 hci_dev_lock(hdev);
6131
6132 adv = hci_find_adv_instance(hdev, ev->handle);
6133
6134 if (ev->status) {
6135 if (!adv)
6136 goto unlock;
6137
6138 /* Remove advertising as it has been terminated */
6139 hci_remove_adv_instance(hdev, ev->handle);
6140 mgmt_advertising_removed(NULL, hdev, ev->handle);
6141
6142 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6143 if (adv->enabled)
6144 goto unlock;
6145 }
6146
6147 /* We are no longer advertising, clear HCI_LE_ADV */
6148 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6149 goto unlock;
6150 }
6151
6152 if (adv)
6153 adv->enabled = false;
6154
6155 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6156 if (conn) {
6157 /* Store handle in the connection so the correct advertising
6158 * instance can be re-enabled when disconnected.
6159 */
6160 conn->adv_instance = ev->handle;
6161
6162 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6163 bacmp(&conn->resp_addr, BDADDR_ANY))
6164 goto unlock;
6165
6166 if (!ev->handle) {
6167 bacpy(&conn->resp_addr, &hdev->random_addr);
6168 goto unlock;
6169 }
6170
6171 if (adv)
6172 bacpy(&conn->resp_addr, &adv->random_addr);
6173 }
6174
6175 unlock:
6176 hci_dev_unlock(hdev);
6177 }
6178
hci_le_conn_update_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6179 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6180 struct sk_buff *skb)
6181 {
6182 struct hci_ev_le_conn_update_complete *ev = data;
6183 struct hci_conn *conn;
6184
6185 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6186
6187 if (ev->status)
6188 return;
6189
6190 hci_dev_lock(hdev);
6191
6192 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6193 if (conn) {
6194 conn->le_conn_interval = le16_to_cpu(ev->interval);
6195 conn->le_conn_latency = le16_to_cpu(ev->latency);
6196 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6197 }
6198
6199 hci_dev_unlock(hdev);
6200 }
6201
6202 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,bool addr_resolved,u8 adv_type)6203 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6204 bdaddr_t *addr,
6205 u8 addr_type, bool addr_resolved,
6206 u8 adv_type)
6207 {
6208 struct hci_conn *conn;
6209 struct hci_conn_params *params;
6210
6211 /* If the event is not connectable don't proceed further */
6212 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6213 return NULL;
6214
6215 /* Ignore if the device is blocked or hdev is suspended */
6216 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6217 hdev->suspended)
6218 return NULL;
6219
6220 /* Most controller will fail if we try to create new connections
6221 * while we have an existing one in peripheral role.
6222 */
6223 if (hdev->conn_hash.le_num_peripheral > 0 &&
6224 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6225 !(hdev->le_states[3] & 0x10)))
6226 return NULL;
6227
6228 /* If we're not connectable only connect devices that we have in
6229 * our pend_le_conns list.
6230 */
6231 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6232 addr_type);
6233 if (!params)
6234 return NULL;
6235
6236 if (!params->explicit_connect) {
6237 switch (params->auto_connect) {
6238 case HCI_AUTO_CONN_DIRECT:
6239 /* Only devices advertising with ADV_DIRECT_IND are
6240 * triggering a connection attempt. This is allowing
6241 * incoming connections from peripheral devices.
6242 */
6243 if (adv_type != LE_ADV_DIRECT_IND)
6244 return NULL;
6245 break;
6246 case HCI_AUTO_CONN_ALWAYS:
6247 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6248 * are triggering a connection attempt. This means
6249 * that incoming connections from peripheral device are
6250 * accepted and also outgoing connections to peripheral
6251 * devices are established when found.
6252 */
6253 break;
6254 default:
6255 return NULL;
6256 }
6257 }
6258
6259 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6260 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6261 HCI_ROLE_MASTER);
6262 if (!IS_ERR(conn)) {
6263 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6264 * by higher layer that tried to connect, if no then
6265 * store the pointer since we don't really have any
6266 * other owner of the object besides the params that
6267 * triggered it. This way we can abort the connection if
6268 * the parameters get removed and keep the reference
6269 * count consistent once the connection is established.
6270 */
6271
6272 if (!params->explicit_connect)
6273 params->conn = hci_conn_get(conn);
6274
6275 return conn;
6276 }
6277
6278 switch (PTR_ERR(conn)) {
6279 case -EBUSY:
6280 /* If hci_connect() returns -EBUSY it means there is already
6281 * an LE connection attempt going on. Since controllers don't
6282 * support more than one connection attempt at the time, we
6283 * don't consider this an error case.
6284 */
6285 break;
6286 default:
6287 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6288 return NULL;
6289 }
6290
6291 return NULL;
6292 }
6293
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv,bool ctl_time,u64 instant)6294 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6295 u8 bdaddr_type, bdaddr_t *direct_addr,
6296 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6297 bool ext_adv, bool ctl_time, u64 instant)
6298 {
6299 struct discovery_state *d = &hdev->discovery;
6300 struct smp_irk *irk;
6301 struct hci_conn *conn;
6302 bool match, bdaddr_resolved;
6303 u32 flags;
6304 u8 *ptr;
6305
6306 switch (type) {
6307 case LE_ADV_IND:
6308 case LE_ADV_DIRECT_IND:
6309 case LE_ADV_SCAN_IND:
6310 case LE_ADV_NONCONN_IND:
6311 case LE_ADV_SCAN_RSP:
6312 break;
6313 default:
6314 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6315 "type: 0x%02x", type);
6316 return;
6317 }
6318
6319 if (len > max_adv_len(hdev)) {
6320 bt_dev_err_ratelimited(hdev,
6321 "adv larger than maximum supported");
6322 return;
6323 }
6324
6325 /* Find the end of the data in case the report contains padded zero
6326 * bytes at the end causing an invalid length value.
6327 *
6328 * When data is NULL, len is 0 so there is no need for extra ptr
6329 * check as 'ptr < data + 0' is already false in such case.
6330 */
6331 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6332 if (ptr + 1 + *ptr > data + len)
6333 break;
6334 }
6335
6336 /* Adjust for actual length. This handles the case when remote
6337 * device is advertising with incorrect data length.
6338 */
6339 len = ptr - data;
6340
6341 /* If the direct address is present, then this report is from
6342 * a LE Direct Advertising Report event. In that case it is
6343 * important to see if the address is matching the local
6344 * controller address.
6345 */
6346 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6347 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6348 &bdaddr_resolved);
6349
6350 /* Only resolvable random addresses are valid for these
6351 * kind of reports and others can be ignored.
6352 */
6353 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6354 return;
6355
6356 /* If the controller is not using resolvable random
6357 * addresses, then this report can be ignored.
6358 */
6359 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6360 return;
6361
6362 /* If the local IRK of the controller does not match
6363 * with the resolvable random address provided, then
6364 * this report can be ignored.
6365 */
6366 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6367 return;
6368 }
6369
6370 /* Check if we need to convert to identity address */
6371 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6372 if (irk) {
6373 bdaddr = &irk->bdaddr;
6374 bdaddr_type = irk->addr_type;
6375 }
6376
6377 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6378
6379 /* Check if we have been requested to connect to this device.
6380 *
6381 * direct_addr is set only for directed advertising reports (it is NULL
6382 * for advertising reports) and is already verified to be RPA above.
6383 */
6384 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6385 type);
6386 if (!ext_adv && conn && type == LE_ADV_IND &&
6387 len <= max_adv_len(hdev)) {
6388 /* Store report for later inclusion by
6389 * mgmt_device_connected
6390 */
6391 memcpy(conn->le_adv_data, data, len);
6392 conn->le_adv_data_len = len;
6393 }
6394
6395 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6396 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6397 else
6398 flags = 0;
6399
6400 /* All scan results should be sent up for Mesh systems */
6401 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6402 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6403 rssi, flags, data, len, NULL, 0, instant);
6404 return;
6405 }
6406
6407 /* Passive scanning shouldn't trigger any device found events,
6408 * except for devices marked as CONN_REPORT for which we do send
6409 * device found events, or advertisement monitoring requested.
6410 */
6411 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6412 if (type == LE_ADV_DIRECT_IND)
6413 return;
6414
6415 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6416 bdaddr, bdaddr_type) &&
6417 idr_is_empty(&hdev->adv_monitors_idr))
6418 return;
6419
6420 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6421 rssi, flags, data, len, NULL, 0, 0);
6422 return;
6423 }
6424
6425 /* When receiving a scan response, then there is no way to
6426 * know if the remote device is connectable or not. However
6427 * since scan responses are merged with a previously seen
6428 * advertising report, the flags field from that report
6429 * will be used.
6430 *
6431 * In the unlikely case that a controller just sends a scan
6432 * response event that doesn't match the pending report, then
6433 * it is marked as a standalone SCAN_RSP.
6434 */
6435 if (type == LE_ADV_SCAN_RSP)
6436 flags = MGMT_DEV_FOUND_SCAN_RSP;
6437
6438 /* If there's nothing pending either store the data from this
6439 * event or send an immediate device found event if the data
6440 * should not be stored for later.
6441 */
6442 if (!ext_adv && !has_pending_adv_report(hdev)) {
6443 /* If the report will trigger a SCAN_REQ store it for
6444 * later merging.
6445 */
6446 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6447 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6448 rssi, flags, data, len);
6449 return;
6450 }
6451
6452 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6453 rssi, flags, data, len, NULL, 0, 0);
6454 return;
6455 }
6456
6457 /* Check if the pending report is for the same device as the new one */
6458 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6459 bdaddr_type == d->last_adv_addr_type);
6460
6461 /* If the pending data doesn't match this report or this isn't a
6462 * scan response (e.g. we got a duplicate ADV_IND) then force
6463 * sending of the pending data.
6464 */
6465 if (type != LE_ADV_SCAN_RSP || !match) {
6466 /* Send out whatever is in the cache, but skip duplicates */
6467 if (!match)
6468 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6469 d->last_adv_addr_type, NULL,
6470 d->last_adv_rssi, d->last_adv_flags,
6471 d->last_adv_data,
6472 d->last_adv_data_len, NULL, 0, 0);
6473
6474 /* If the new report will trigger a SCAN_REQ store it for
6475 * later merging.
6476 */
6477 if (!ext_adv && (type == LE_ADV_IND ||
6478 type == LE_ADV_SCAN_IND)) {
6479 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6480 rssi, flags, data, len);
6481 return;
6482 }
6483
6484 /* The advertising reports cannot be merged, so clear
6485 * the pending report and send out a device found event.
6486 */
6487 clear_pending_adv_report(hdev);
6488 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6489 rssi, flags, data, len, NULL, 0, 0);
6490 return;
6491 }
6492
6493 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6494 * the new event is a SCAN_RSP. We can therefore proceed with
6495 * sending a merged device found event.
6496 */
6497 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6498 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6499 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6500 clear_pending_adv_report(hdev);
6501 }
6502
hci_le_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6503 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6504 struct sk_buff *skb)
6505 {
6506 struct hci_ev_le_advertising_report *ev = data;
6507 u64 instant = jiffies;
6508
6509 if (!ev->num)
6510 return;
6511
6512 hci_dev_lock(hdev);
6513
6514 while (ev->num--) {
6515 struct hci_ev_le_advertising_info *info;
6516 s8 rssi;
6517
6518 info = hci_le_ev_skb_pull(hdev, skb,
6519 HCI_EV_LE_ADVERTISING_REPORT,
6520 sizeof(*info));
6521 if (!info)
6522 break;
6523
6524 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6525 info->length + 1))
6526 break;
6527
6528 if (info->length <= max_adv_len(hdev)) {
6529 rssi = info->data[info->length];
6530 process_adv_report(hdev, info->type, &info->bdaddr,
6531 info->bdaddr_type, NULL, 0, rssi,
6532 info->data, info->length, false,
6533 false, instant);
6534 } else {
6535 bt_dev_err(hdev, "Dropping invalid advertising data");
6536 }
6537 }
6538
6539 hci_dev_unlock(hdev);
6540 }
6541
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)6542 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6543 {
6544 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6545 switch (evt_type) {
6546 case LE_LEGACY_ADV_IND:
6547 return LE_ADV_IND;
6548 case LE_LEGACY_ADV_DIRECT_IND:
6549 return LE_ADV_DIRECT_IND;
6550 case LE_LEGACY_ADV_SCAN_IND:
6551 return LE_ADV_SCAN_IND;
6552 case LE_LEGACY_NONCONN_IND:
6553 return LE_ADV_NONCONN_IND;
6554 case LE_LEGACY_SCAN_RSP_ADV:
6555 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6556 return LE_ADV_SCAN_RSP;
6557 }
6558
6559 goto invalid;
6560 }
6561
6562 if (evt_type & LE_EXT_ADV_CONN_IND) {
6563 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6564 return LE_ADV_DIRECT_IND;
6565
6566 return LE_ADV_IND;
6567 }
6568
6569 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6570 return LE_ADV_SCAN_RSP;
6571
6572 if (evt_type & LE_EXT_ADV_SCAN_IND)
6573 return LE_ADV_SCAN_IND;
6574
6575 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6576 evt_type & LE_EXT_ADV_DIRECT_IND)
6577 return LE_ADV_NONCONN_IND;
6578
6579 invalid:
6580 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6581 evt_type);
6582
6583 return LE_ADV_INVALID;
6584 }
6585
hci_le_ext_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6586 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6587 struct sk_buff *skb)
6588 {
6589 struct hci_ev_le_ext_adv_report *ev = data;
6590 u64 instant = jiffies;
6591
6592 if (!ev->num)
6593 return;
6594
6595 hci_dev_lock(hdev);
6596
6597 while (ev->num--) {
6598 struct hci_ev_le_ext_adv_info *info;
6599 u8 legacy_evt_type;
6600 u16 evt_type;
6601
6602 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6603 sizeof(*info));
6604 if (!info)
6605 break;
6606
6607 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6608 info->length))
6609 break;
6610
6611 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6612 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6613 if (legacy_evt_type != LE_ADV_INVALID) {
6614 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6615 info->bdaddr_type, NULL, 0,
6616 info->rssi, info->data, info->length,
6617 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6618 false, instant);
6619 }
6620 }
6621
6622 hci_dev_unlock(hdev);
6623 }
6624
hci_le_pa_term_sync(struct hci_dev * hdev,__le16 handle)6625 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6626 {
6627 struct hci_cp_le_pa_term_sync cp;
6628
6629 memset(&cp, 0, sizeof(cp));
6630 cp.handle = handle;
6631
6632 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6633 }
6634
hci_le_pa_sync_estabilished_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6635 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6636 struct sk_buff *skb)
6637 {
6638 struct hci_ev_le_pa_sync_established *ev = data;
6639 int mask = hdev->link_mode;
6640 __u8 flags = 0;
6641 struct hci_conn *pa_sync;
6642
6643 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6644
6645 hci_dev_lock(hdev);
6646
6647 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6648
6649 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6650 if (!(mask & HCI_LM_ACCEPT)) {
6651 hci_le_pa_term_sync(hdev, ev->handle);
6652 goto unlock;
6653 }
6654
6655 if (!(flags & HCI_PROTO_DEFER))
6656 goto unlock;
6657
6658 if (ev->status) {
6659 /* Add connection to indicate the failed PA sync event */
6660 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6661 HCI_ROLE_SLAVE);
6662
6663 if (!pa_sync)
6664 goto unlock;
6665
6666 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6667
6668 /* Notify iso layer */
6669 hci_connect_cfm(pa_sync, ev->status);
6670 }
6671
6672 unlock:
6673 hci_dev_unlock(hdev);
6674 }
6675
hci_le_per_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6676 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6677 struct sk_buff *skb)
6678 {
6679 struct hci_ev_le_per_adv_report *ev = data;
6680 int mask = hdev->link_mode;
6681 __u8 flags = 0;
6682
6683 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6684
6685 hci_dev_lock(hdev);
6686
6687 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6688 if (!(mask & HCI_LM_ACCEPT))
6689 hci_le_pa_term_sync(hdev, ev->sync_handle);
6690
6691 hci_dev_unlock(hdev);
6692 }
6693
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6694 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6695 struct sk_buff *skb)
6696 {
6697 struct hci_ev_le_remote_feat_complete *ev = data;
6698 struct hci_conn *conn;
6699
6700 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6701
6702 hci_dev_lock(hdev);
6703
6704 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6705 if (conn) {
6706 if (!ev->status)
6707 memcpy(conn->features[0], ev->features, 8);
6708
6709 if (conn->state == BT_CONFIG) {
6710 __u8 status;
6711
6712 /* If the local controller supports peripheral-initiated
6713 * features exchange, but the remote controller does
6714 * not, then it is possible that the error code 0x1a
6715 * for unsupported remote feature gets returned.
6716 *
6717 * In this specific case, allow the connection to
6718 * transition into connected state and mark it as
6719 * successful.
6720 */
6721 if (!conn->out && ev->status == 0x1a &&
6722 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6723 status = 0x00;
6724 else
6725 status = ev->status;
6726
6727 conn->state = BT_CONNECTED;
6728 hci_connect_cfm(conn, status);
6729 hci_conn_drop(conn);
6730 }
6731 }
6732
6733 hci_dev_unlock(hdev);
6734 }
6735
hci_le_ltk_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6736 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6737 struct sk_buff *skb)
6738 {
6739 struct hci_ev_le_ltk_req *ev = data;
6740 struct hci_cp_le_ltk_reply cp;
6741 struct hci_cp_le_ltk_neg_reply neg;
6742 struct hci_conn *conn;
6743 struct smp_ltk *ltk;
6744
6745 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6746
6747 hci_dev_lock(hdev);
6748
6749 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6750 if (conn == NULL)
6751 goto not_found;
6752
6753 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6754 if (!ltk)
6755 goto not_found;
6756
6757 if (smp_ltk_is_sc(ltk)) {
6758 /* With SC both EDiv and Rand are set to zero */
6759 if (ev->ediv || ev->rand)
6760 goto not_found;
6761 } else {
6762 /* For non-SC keys check that EDiv and Rand match */
6763 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6764 goto not_found;
6765 }
6766
6767 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6768 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6769 cp.handle = cpu_to_le16(conn->handle);
6770
6771 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6772
6773 conn->enc_key_size = ltk->enc_size;
6774
6775 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6776
6777 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6778 * temporary key used to encrypt a connection following
6779 * pairing. It is used during the Encrypted Session Setup to
6780 * distribute the keys. Later, security can be re-established
6781 * using a distributed LTK.
6782 */
6783 if (ltk->type == SMP_STK) {
6784 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6785 list_del_rcu(<k->list);
6786 kfree_rcu(ltk, rcu);
6787 } else {
6788 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6789 }
6790
6791 hci_dev_unlock(hdev);
6792
6793 return;
6794
6795 not_found:
6796 neg.handle = ev->handle;
6797 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6798 hci_dev_unlock(hdev);
6799 }
6800
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6801 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6802 u8 reason)
6803 {
6804 struct hci_cp_le_conn_param_req_neg_reply cp;
6805
6806 cp.handle = cpu_to_le16(handle);
6807 cp.reason = reason;
6808
6809 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6810 &cp);
6811 }
6812
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6813 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6814 struct sk_buff *skb)
6815 {
6816 struct hci_ev_le_remote_conn_param_req *ev = data;
6817 struct hci_cp_le_conn_param_req_reply cp;
6818 struct hci_conn *hcon;
6819 u16 handle, min, max, latency, timeout;
6820
6821 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6822
6823 handle = le16_to_cpu(ev->handle);
6824 min = le16_to_cpu(ev->interval_min);
6825 max = le16_to_cpu(ev->interval_max);
6826 latency = le16_to_cpu(ev->latency);
6827 timeout = le16_to_cpu(ev->timeout);
6828
6829 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6830 if (!hcon || hcon->state != BT_CONNECTED)
6831 return send_conn_param_neg_reply(hdev, handle,
6832 HCI_ERROR_UNKNOWN_CONN_ID);
6833
6834 if (max > hcon->le_conn_max_interval)
6835 return send_conn_param_neg_reply(hdev, handle,
6836 HCI_ERROR_INVALID_LL_PARAMS);
6837
6838 if (hci_check_conn_params(min, max, latency, timeout))
6839 return send_conn_param_neg_reply(hdev, handle,
6840 HCI_ERROR_INVALID_LL_PARAMS);
6841
6842 if (hcon->role == HCI_ROLE_MASTER) {
6843 struct hci_conn_params *params;
6844 u8 store_hint;
6845
6846 hci_dev_lock(hdev);
6847
6848 params = hci_conn_params_lookup(hdev, &hcon->dst,
6849 hcon->dst_type);
6850 if (params) {
6851 params->conn_min_interval = min;
6852 params->conn_max_interval = max;
6853 params->conn_latency = latency;
6854 params->supervision_timeout = timeout;
6855 store_hint = 0x01;
6856 } else {
6857 store_hint = 0x00;
6858 }
6859
6860 hci_dev_unlock(hdev);
6861
6862 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6863 store_hint, min, max, latency, timeout);
6864 }
6865
6866 cp.handle = ev->handle;
6867 cp.interval_min = ev->interval_min;
6868 cp.interval_max = ev->interval_max;
6869 cp.latency = ev->latency;
6870 cp.timeout = ev->timeout;
6871 cp.min_ce_len = 0;
6872 cp.max_ce_len = 0;
6873
6874 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6875 }
6876
hci_le_direct_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6877 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6878 struct sk_buff *skb)
6879 {
6880 struct hci_ev_le_direct_adv_report *ev = data;
6881 u64 instant = jiffies;
6882 int i;
6883
6884 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6885 flex_array_size(ev, info, ev->num)))
6886 return;
6887
6888 if (!ev->num)
6889 return;
6890
6891 hci_dev_lock(hdev);
6892
6893 for (i = 0; i < ev->num; i++) {
6894 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6895
6896 process_adv_report(hdev, info->type, &info->bdaddr,
6897 info->bdaddr_type, &info->direct_addr,
6898 info->direct_addr_type, info->rssi, NULL, 0,
6899 false, false, instant);
6900 }
6901
6902 hci_dev_unlock(hdev);
6903 }
6904
hci_le_phy_update_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6905 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6906 struct sk_buff *skb)
6907 {
6908 struct hci_ev_le_phy_update_complete *ev = data;
6909 struct hci_conn *conn;
6910
6911 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6912
6913 if (ev->status)
6914 return;
6915
6916 hci_dev_lock(hdev);
6917
6918 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6919 if (!conn)
6920 goto unlock;
6921
6922 conn->le_tx_phy = ev->tx_phy;
6923 conn->le_rx_phy = ev->rx_phy;
6924
6925 unlock:
6926 hci_dev_unlock(hdev);
6927 }
6928
hci_le_cis_estabilished_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6929 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6930 struct sk_buff *skb)
6931 {
6932 struct hci_evt_le_cis_established *ev = data;
6933 struct hci_conn *conn;
6934 struct bt_iso_qos *qos;
6935 bool pending = false;
6936 u16 handle = __le16_to_cpu(ev->handle);
6937
6938 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6939
6940 hci_dev_lock(hdev);
6941
6942 conn = hci_conn_hash_lookup_handle(hdev, handle);
6943 if (!conn) {
6944 bt_dev_err(hdev,
6945 "Unable to find connection with handle 0x%4.4x",
6946 handle);
6947 goto unlock;
6948 }
6949
6950 if (conn->type != ISO_LINK) {
6951 bt_dev_err(hdev,
6952 "Invalid connection link type handle 0x%4.4x",
6953 handle);
6954 goto unlock;
6955 }
6956
6957 qos = &conn->iso_qos;
6958
6959 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6960
6961 /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6962 qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6963 qos->ucast.out.interval = qos->ucast.in.interval;
6964
6965 switch (conn->role) {
6966 case HCI_ROLE_SLAVE:
6967 /* Convert Transport Latency (us) to Latency (msec) */
6968 qos->ucast.in.latency =
6969 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6970 1000);
6971 qos->ucast.out.latency =
6972 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6973 1000);
6974 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6975 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6976 qos->ucast.in.phy = ev->c_phy;
6977 qos->ucast.out.phy = ev->p_phy;
6978 break;
6979 case HCI_ROLE_MASTER:
6980 /* Convert Transport Latency (us) to Latency (msec) */
6981 qos->ucast.out.latency =
6982 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6983 1000);
6984 qos->ucast.in.latency =
6985 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6986 1000);
6987 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6988 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6989 qos->ucast.out.phy = ev->c_phy;
6990 qos->ucast.in.phy = ev->p_phy;
6991 break;
6992 }
6993
6994 if (!ev->status) {
6995 conn->state = BT_CONNECTED;
6996 hci_debugfs_create_conn(conn);
6997 hci_conn_add_sysfs(conn);
6998 hci_iso_setup_path(conn);
6999 goto unlock;
7000 }
7001
7002 conn->state = BT_CLOSED;
7003 hci_connect_cfm(conn, ev->status);
7004 hci_conn_del(conn);
7005
7006 unlock:
7007 if (pending)
7008 hci_le_create_cis_pending(hdev);
7009
7010 hci_dev_unlock(hdev);
7011 }
7012
hci_le_reject_cis(struct hci_dev * hdev,__le16 handle)7013 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7014 {
7015 struct hci_cp_le_reject_cis cp;
7016
7017 memset(&cp, 0, sizeof(cp));
7018 cp.handle = handle;
7019 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7020 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7021 }
7022
hci_le_accept_cis(struct hci_dev * hdev,__le16 handle)7023 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7024 {
7025 struct hci_cp_le_accept_cis cp;
7026
7027 memset(&cp, 0, sizeof(cp));
7028 cp.handle = handle;
7029 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7030 }
7031
hci_le_cis_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7032 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7033 struct sk_buff *skb)
7034 {
7035 struct hci_evt_le_cis_req *ev = data;
7036 u16 acl_handle, cis_handle;
7037 struct hci_conn *acl, *cis;
7038 int mask;
7039 __u8 flags = 0;
7040
7041 acl_handle = __le16_to_cpu(ev->acl_handle);
7042 cis_handle = __le16_to_cpu(ev->cis_handle);
7043
7044 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7045 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7046
7047 hci_dev_lock(hdev);
7048
7049 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7050 if (!acl)
7051 goto unlock;
7052
7053 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7054 if (!(mask & HCI_LM_ACCEPT)) {
7055 hci_le_reject_cis(hdev, ev->cis_handle);
7056 goto unlock;
7057 }
7058
7059 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7060 if (!cis) {
7061 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7062 cis_handle);
7063 if (IS_ERR(cis)) {
7064 hci_le_reject_cis(hdev, ev->cis_handle);
7065 goto unlock;
7066 }
7067 }
7068
7069 cis->iso_qos.ucast.cig = ev->cig_id;
7070 cis->iso_qos.ucast.cis = ev->cis_id;
7071
7072 if (!(flags & HCI_PROTO_DEFER)) {
7073 hci_le_accept_cis(hdev, ev->cis_handle);
7074 } else {
7075 cis->state = BT_CONNECT2;
7076 hci_connect_cfm(cis, 0);
7077 }
7078
7079 unlock:
7080 hci_dev_unlock(hdev);
7081 }
7082
hci_iso_term_big_sync(struct hci_dev * hdev,void * data)7083 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7084 {
7085 u8 handle = PTR_UINT(data);
7086
7087 return hci_le_terminate_big_sync(hdev, handle,
7088 HCI_ERROR_LOCAL_HOST_TERM);
7089 }
7090
hci_le_create_big_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7091 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7092 struct sk_buff *skb)
7093 {
7094 struct hci_evt_le_create_big_complete *ev = data;
7095 struct hci_conn *conn;
7096 __u8 i = 0;
7097
7098 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7099
7100 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7101 flex_array_size(ev, bis_handle, ev->num_bis)))
7102 return;
7103
7104 hci_dev_lock(hdev);
7105 rcu_read_lock();
7106
7107 /* Connect all BISes that are bound to the BIG */
7108 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7109 if (bacmp(&conn->dst, BDADDR_ANY) ||
7110 conn->type != ISO_LINK ||
7111 conn->iso_qos.bcast.big != ev->handle)
7112 continue;
7113
7114 if (hci_conn_set_handle(conn,
7115 __le16_to_cpu(ev->bis_handle[i++])))
7116 continue;
7117
7118 if (!ev->status) {
7119 conn->state = BT_CONNECTED;
7120 set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7121 rcu_read_unlock();
7122 hci_debugfs_create_conn(conn);
7123 hci_conn_add_sysfs(conn);
7124 hci_iso_setup_path(conn);
7125 rcu_read_lock();
7126 continue;
7127 }
7128
7129 hci_connect_cfm(conn, ev->status);
7130 rcu_read_unlock();
7131 hci_conn_del(conn);
7132 rcu_read_lock();
7133 }
7134
7135 rcu_read_unlock();
7136
7137 if (!ev->status && !i)
7138 /* If no BISes have been connected for the BIG,
7139 * terminate. This is in case all bound connections
7140 * have been closed before the BIG creation
7141 * has completed.
7142 */
7143 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7144 UINT_PTR(ev->handle), NULL);
7145
7146 hci_dev_unlock(hdev);
7147 }
7148
hci_le_big_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7149 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7150 struct sk_buff *skb)
7151 {
7152 struct hci_evt_le_big_sync_estabilished *ev = data;
7153 struct hci_conn *bis;
7154 struct hci_conn *pa_sync;
7155 int i;
7156
7157 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7158
7159 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7160 flex_array_size(ev, bis, ev->num_bis)))
7161 return;
7162
7163 hci_dev_lock(hdev);
7164
7165 if (!ev->status) {
7166 pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7167 if (pa_sync)
7168 /* Also mark the BIG sync established event on the
7169 * associated PA sync hcon
7170 */
7171 set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7172 }
7173
7174 for (i = 0; i < ev->num_bis; i++) {
7175 u16 handle = le16_to_cpu(ev->bis[i]);
7176 __le32 interval;
7177
7178 bis = hci_conn_hash_lookup_handle(hdev, handle);
7179 if (!bis) {
7180 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7181 HCI_ROLE_SLAVE, handle);
7182 if (IS_ERR(bis))
7183 continue;
7184 }
7185
7186 if (ev->status != 0x42)
7187 /* Mark PA sync as established */
7188 set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7189
7190 bis->iso_qos.bcast.big = ev->handle;
7191 memset(&interval, 0, sizeof(interval));
7192 memcpy(&interval, ev->latency, sizeof(ev->latency));
7193 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7194 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7195 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7196 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7197
7198 if (!ev->status) {
7199 set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7200 hci_iso_setup_path(bis);
7201 }
7202 }
7203
7204 /* In case BIG sync failed, notify each failed connection to
7205 * the user after all hci connections have been added
7206 */
7207 if (ev->status)
7208 for (i = 0; i < ev->num_bis; i++) {
7209 u16 handle = le16_to_cpu(ev->bis[i]);
7210
7211 bis = hci_conn_hash_lookup_handle(hdev, handle);
7212 if (!bis)
7213 continue;
7214
7215 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7216 hci_connect_cfm(bis, ev->status);
7217 }
7218
7219 hci_dev_unlock(hdev);
7220 }
7221
hci_le_big_info_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7222 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7223 struct sk_buff *skb)
7224 {
7225 struct hci_evt_le_big_info_adv_report *ev = data;
7226 int mask = hdev->link_mode;
7227 __u8 flags = 0;
7228 struct hci_conn *pa_sync;
7229
7230 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7231
7232 hci_dev_lock(hdev);
7233
7234 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7235 if (!(mask & HCI_LM_ACCEPT)) {
7236 hci_le_pa_term_sync(hdev, ev->sync_handle);
7237 goto unlock;
7238 }
7239
7240 if (!(flags & HCI_PROTO_DEFER))
7241 goto unlock;
7242
7243 pa_sync = hci_conn_hash_lookup_pa_sync_handle
7244 (hdev,
7245 le16_to_cpu(ev->sync_handle));
7246
7247 if (pa_sync)
7248 goto unlock;
7249
7250 /* Add connection to indicate the PA sync event */
7251 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7252 HCI_ROLE_SLAVE);
7253
7254 if (IS_ERR(pa_sync))
7255 goto unlock;
7256
7257 pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7258 set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7259
7260 /* Notify iso layer */
7261 hci_connect_cfm(pa_sync, 0x00);
7262
7263 /* Notify MGMT layer */
7264 mgmt_device_connected(hdev, pa_sync, NULL, 0);
7265
7266 unlock:
7267 hci_dev_unlock(hdev);
7268 }
7269
7270 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7271 [_op] = { \
7272 .func = _func, \
7273 .min_len = _min_len, \
7274 .max_len = _max_len, \
7275 }
7276
7277 #define HCI_LE_EV(_op, _func, _len) \
7278 HCI_LE_EV_VL(_op, _func, _len, _len)
7279
7280 #define HCI_LE_EV_STATUS(_op, _func) \
7281 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7282
7283 /* Entries in this table shall have their position according to the subevent
7284 * opcode they handle so the use of the macros above is recommend since it does
7285 * attempt to initialize at its proper index using Designated Initializers that
7286 * way events without a callback function can be ommited.
7287 */
7288 static const struct hci_le_ev {
7289 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7290 u16 min_len;
7291 u16 max_len;
7292 } hci_le_ev_table[U8_MAX + 1] = {
7293 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7294 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7295 sizeof(struct hci_ev_le_conn_complete)),
7296 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7297 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7298 sizeof(struct hci_ev_le_advertising_report),
7299 HCI_MAX_EVENT_SIZE),
7300 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7301 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7302 hci_le_conn_update_complete_evt,
7303 sizeof(struct hci_ev_le_conn_update_complete)),
7304 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7305 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7306 hci_le_remote_feat_complete_evt,
7307 sizeof(struct hci_ev_le_remote_feat_complete)),
7308 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7309 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7310 sizeof(struct hci_ev_le_ltk_req)),
7311 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7312 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7313 hci_le_remote_conn_param_req_evt,
7314 sizeof(struct hci_ev_le_remote_conn_param_req)),
7315 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7316 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7317 hci_le_enh_conn_complete_evt,
7318 sizeof(struct hci_ev_le_enh_conn_complete)),
7319 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7320 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7321 sizeof(struct hci_ev_le_direct_adv_report),
7322 HCI_MAX_EVENT_SIZE),
7323 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7324 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7325 sizeof(struct hci_ev_le_phy_update_complete)),
7326 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7327 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7328 sizeof(struct hci_ev_le_ext_adv_report),
7329 HCI_MAX_EVENT_SIZE),
7330 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7331 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7332 hci_le_pa_sync_estabilished_evt,
7333 sizeof(struct hci_ev_le_pa_sync_established)),
7334 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7335 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7336 hci_le_per_adv_report_evt,
7337 sizeof(struct hci_ev_le_per_adv_report),
7338 HCI_MAX_EVENT_SIZE),
7339 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7340 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7341 sizeof(struct hci_evt_le_ext_adv_set_term)),
7342 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7343 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7344 sizeof(struct hci_evt_le_cis_established)),
7345 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7346 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7347 sizeof(struct hci_evt_le_cis_req)),
7348 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7349 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7350 hci_le_create_big_complete_evt,
7351 sizeof(struct hci_evt_le_create_big_complete),
7352 HCI_MAX_EVENT_SIZE),
7353 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7354 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7355 hci_le_big_sync_established_evt,
7356 sizeof(struct hci_evt_le_big_sync_estabilished),
7357 HCI_MAX_EVENT_SIZE),
7358 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7359 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7360 hci_le_big_info_adv_report_evt,
7361 sizeof(struct hci_evt_le_big_info_adv_report),
7362 HCI_MAX_EVENT_SIZE),
7363 };
7364
hci_le_meta_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7365 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7366 struct sk_buff *skb, u16 *opcode, u8 *status,
7367 hci_req_complete_t *req_complete,
7368 hci_req_complete_skb_t *req_complete_skb)
7369 {
7370 struct hci_ev_le_meta *ev = data;
7371 const struct hci_le_ev *subev;
7372
7373 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7374
7375 /* Only match event if command OGF is for LE */
7376 if (hdev->req_skb &&
7377 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7378 hci_skb_event(hdev->req_skb) == ev->subevent) {
7379 *opcode = hci_skb_opcode(hdev->req_skb);
7380 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7381 req_complete_skb);
7382 }
7383
7384 subev = &hci_le_ev_table[ev->subevent];
7385 if (!subev->func)
7386 return;
7387
7388 if (skb->len < subev->min_len) {
7389 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7390 ev->subevent, skb->len, subev->min_len);
7391 return;
7392 }
7393
7394 /* Just warn if the length is over max_len size it still be
7395 * possible to partially parse the event so leave to callback to
7396 * decide if that is acceptable.
7397 */
7398 if (skb->len > subev->max_len)
7399 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7400 ev->subevent, skb->len, subev->max_len);
7401 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7402 if (!data)
7403 return;
7404
7405 subev->func(hdev, data, skb);
7406 }
7407
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)7408 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7409 u8 event, struct sk_buff *skb)
7410 {
7411 struct hci_ev_cmd_complete *ev;
7412 struct hci_event_hdr *hdr;
7413
7414 if (!skb)
7415 return false;
7416
7417 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7418 if (!hdr)
7419 return false;
7420
7421 if (event) {
7422 if (hdr->evt != event)
7423 return false;
7424 return true;
7425 }
7426
7427 /* Check if request ended in Command Status - no way to retrieve
7428 * any extra parameters in this case.
7429 */
7430 if (hdr->evt == HCI_EV_CMD_STATUS)
7431 return false;
7432
7433 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7434 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7435 hdr->evt);
7436 return false;
7437 }
7438
7439 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7440 if (!ev)
7441 return false;
7442
7443 if (opcode != __le16_to_cpu(ev->opcode)) {
7444 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7445 __le16_to_cpu(ev->opcode));
7446 return false;
7447 }
7448
7449 return true;
7450 }
7451
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)7452 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7453 struct sk_buff *skb)
7454 {
7455 struct hci_ev_le_advertising_info *adv;
7456 struct hci_ev_le_direct_adv_info *direct_adv;
7457 struct hci_ev_le_ext_adv_info *ext_adv;
7458 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7459 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7460
7461 hci_dev_lock(hdev);
7462
7463 /* If we are currently suspended and this is the first BT event seen,
7464 * save the wake reason associated with the event.
7465 */
7466 if (!hdev->suspended || hdev->wake_reason)
7467 goto unlock;
7468
7469 /* Default to remote wake. Values for wake_reason are documented in the
7470 * Bluez mgmt api docs.
7471 */
7472 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7473
7474 /* Once configured for remote wakeup, we should only wake up for
7475 * reconnections. It's useful to see which device is waking us up so
7476 * keep track of the bdaddr of the connection event that woke us up.
7477 */
7478 if (event == HCI_EV_CONN_REQUEST) {
7479 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7480 hdev->wake_addr_type = BDADDR_BREDR;
7481 } else if (event == HCI_EV_CONN_COMPLETE) {
7482 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7483 hdev->wake_addr_type = BDADDR_BREDR;
7484 } else if (event == HCI_EV_LE_META) {
7485 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7486 u8 subevent = le_ev->subevent;
7487 u8 *ptr = &skb->data[sizeof(*le_ev)];
7488 u8 num_reports = *ptr;
7489
7490 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7491 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7492 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7493 num_reports) {
7494 adv = (void *)(ptr + 1);
7495 direct_adv = (void *)(ptr + 1);
7496 ext_adv = (void *)(ptr + 1);
7497
7498 switch (subevent) {
7499 case HCI_EV_LE_ADVERTISING_REPORT:
7500 bacpy(&hdev->wake_addr, &adv->bdaddr);
7501 hdev->wake_addr_type = adv->bdaddr_type;
7502 break;
7503 case HCI_EV_LE_DIRECT_ADV_REPORT:
7504 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7505 hdev->wake_addr_type = direct_adv->bdaddr_type;
7506 break;
7507 case HCI_EV_LE_EXT_ADV_REPORT:
7508 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7509 hdev->wake_addr_type = ext_adv->bdaddr_type;
7510 break;
7511 }
7512 }
7513 } else {
7514 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7515 }
7516
7517 unlock:
7518 hci_dev_unlock(hdev);
7519 }
7520
7521 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7522 [_op] = { \
7523 .req = false, \
7524 .func = _func, \
7525 .min_len = _min_len, \
7526 .max_len = _max_len, \
7527 }
7528
7529 #define HCI_EV(_op, _func, _len) \
7530 HCI_EV_VL(_op, _func, _len, _len)
7531
7532 #define HCI_EV_STATUS(_op, _func) \
7533 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7534
7535 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7536 [_op] = { \
7537 .req = true, \
7538 .func_req = _func, \
7539 .min_len = _min_len, \
7540 .max_len = _max_len, \
7541 }
7542
7543 #define HCI_EV_REQ(_op, _func, _len) \
7544 HCI_EV_REQ_VL(_op, _func, _len, _len)
7545
7546 /* Entries in this table shall have their position according to the event opcode
7547 * they handle so the use of the macros above is recommend since it does attempt
7548 * to initialize at its proper index using Designated Initializers that way
7549 * events without a callback function don't have entered.
7550 */
7551 static const struct hci_ev {
7552 bool req;
7553 union {
7554 void (*func)(struct hci_dev *hdev, void *data,
7555 struct sk_buff *skb);
7556 void (*func_req)(struct hci_dev *hdev, void *data,
7557 struct sk_buff *skb, u16 *opcode, u8 *status,
7558 hci_req_complete_t *req_complete,
7559 hci_req_complete_skb_t *req_complete_skb);
7560 };
7561 u16 min_len;
7562 u16 max_len;
7563 } hci_ev_table[U8_MAX + 1] = {
7564 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7565 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7566 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7567 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7568 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7569 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7570 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7571 sizeof(struct hci_ev_conn_complete)),
7572 /* [0x04 = HCI_EV_CONN_REQUEST] */
7573 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7574 sizeof(struct hci_ev_conn_request)),
7575 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7576 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7577 sizeof(struct hci_ev_disconn_complete)),
7578 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7579 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7580 sizeof(struct hci_ev_auth_complete)),
7581 /* [0x07 = HCI_EV_REMOTE_NAME] */
7582 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7583 sizeof(struct hci_ev_remote_name)),
7584 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7585 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7586 sizeof(struct hci_ev_encrypt_change)),
7587 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7588 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7589 hci_change_link_key_complete_evt,
7590 sizeof(struct hci_ev_change_link_key_complete)),
7591 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7592 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7593 sizeof(struct hci_ev_remote_features)),
7594 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7595 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7596 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7597 /* [0x0f = HCI_EV_CMD_STATUS] */
7598 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7599 sizeof(struct hci_ev_cmd_status)),
7600 /* [0x10 = HCI_EV_CMD_STATUS] */
7601 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7602 sizeof(struct hci_ev_hardware_error)),
7603 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7604 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7605 sizeof(struct hci_ev_role_change)),
7606 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7607 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7608 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7609 /* [0x14 = HCI_EV_MODE_CHANGE] */
7610 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7611 sizeof(struct hci_ev_mode_change)),
7612 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7613 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7614 sizeof(struct hci_ev_pin_code_req)),
7615 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7616 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7617 sizeof(struct hci_ev_link_key_req)),
7618 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7619 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7620 sizeof(struct hci_ev_link_key_notify)),
7621 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7622 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7623 sizeof(struct hci_ev_clock_offset)),
7624 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7625 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7626 sizeof(struct hci_ev_pkt_type_change)),
7627 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7628 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7629 sizeof(struct hci_ev_pscan_rep_mode)),
7630 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7631 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7632 hci_inquiry_result_with_rssi_evt,
7633 sizeof(struct hci_ev_inquiry_result_rssi),
7634 HCI_MAX_EVENT_SIZE),
7635 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7636 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7637 sizeof(struct hci_ev_remote_ext_features)),
7638 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7639 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7640 sizeof(struct hci_ev_sync_conn_complete)),
7641 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7642 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7643 hci_extended_inquiry_result_evt,
7644 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7645 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7646 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7647 sizeof(struct hci_ev_key_refresh_complete)),
7648 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7649 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7650 sizeof(struct hci_ev_io_capa_request)),
7651 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7652 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7653 sizeof(struct hci_ev_io_capa_reply)),
7654 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7655 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7656 sizeof(struct hci_ev_user_confirm_req)),
7657 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7658 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7659 sizeof(struct hci_ev_user_passkey_req)),
7660 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7661 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7662 sizeof(struct hci_ev_remote_oob_data_request)),
7663 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7664 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7665 sizeof(struct hci_ev_simple_pair_complete)),
7666 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7667 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7668 sizeof(struct hci_ev_user_passkey_notify)),
7669 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7670 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7671 sizeof(struct hci_ev_keypress_notify)),
7672 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7673 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7674 sizeof(struct hci_ev_remote_host_features)),
7675 /* [0x3e = HCI_EV_LE_META] */
7676 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7677 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7678 #if IS_ENABLED(CONFIG_BT_HS)
7679 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7680 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7681 sizeof(struct hci_ev_phy_link_complete)),
7682 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7683 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7684 sizeof(struct hci_ev_channel_selected)),
7685 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7686 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7687 hci_disconn_loglink_complete_evt,
7688 sizeof(struct hci_ev_disconn_logical_link_complete)),
7689 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7690 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7691 sizeof(struct hci_ev_logical_link_complete)),
7692 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7693 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7694 hci_disconn_phylink_complete_evt,
7695 sizeof(struct hci_ev_disconn_phy_link_complete)),
7696 #endif
7697 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7698 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7699 sizeof(struct hci_ev_num_comp_blocks)),
7700 /* [0xff = HCI_EV_VENDOR] */
7701 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7702 };
7703
hci_event_func(struct hci_dev * hdev,u8 event,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7704 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7705 u16 *opcode, u8 *status,
7706 hci_req_complete_t *req_complete,
7707 hci_req_complete_skb_t *req_complete_skb)
7708 {
7709 const struct hci_ev *ev = &hci_ev_table[event];
7710 void *data;
7711
7712 if (!ev->func)
7713 return;
7714
7715 if (skb->len < ev->min_len) {
7716 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7717 event, skb->len, ev->min_len);
7718 return;
7719 }
7720
7721 /* Just warn if the length is over max_len size it still be
7722 * possible to partially parse the event so leave to callback to
7723 * decide if that is acceptable.
7724 */
7725 if (skb->len > ev->max_len)
7726 bt_dev_warn_ratelimited(hdev,
7727 "unexpected event 0x%2.2x length: %u > %u",
7728 event, skb->len, ev->max_len);
7729
7730 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7731 if (!data)
7732 return;
7733
7734 if (ev->req)
7735 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7736 req_complete_skb);
7737 else
7738 ev->func(hdev, data, skb);
7739 }
7740
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)7741 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7742 {
7743 struct hci_event_hdr *hdr = (void *) skb->data;
7744 hci_req_complete_t req_complete = NULL;
7745 hci_req_complete_skb_t req_complete_skb = NULL;
7746 struct sk_buff *orig_skb = NULL;
7747 u8 status = 0, event, req_evt = 0;
7748 u16 opcode = HCI_OP_NOP;
7749
7750 if (skb->len < sizeof(*hdr)) {
7751 bt_dev_err(hdev, "Malformed HCI Event");
7752 goto done;
7753 }
7754
7755 kfree_skb(hdev->recv_event);
7756 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7757
7758 event = hdr->evt;
7759 if (!event) {
7760 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7761 event);
7762 goto done;
7763 }
7764
7765 /* Only match event if command OGF is not for LE */
7766 if (hdev->req_skb &&
7767 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7768 hci_skb_event(hdev->req_skb) == event) {
7769 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7770 status, &req_complete, &req_complete_skb);
7771 req_evt = event;
7772 }
7773
7774 /* If it looks like we might end up having to call
7775 * req_complete_skb, store a pristine copy of the skb since the
7776 * various handlers may modify the original one through
7777 * skb_pull() calls, etc.
7778 */
7779 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7780 event == HCI_EV_CMD_COMPLETE)
7781 orig_skb = skb_clone(skb, GFP_KERNEL);
7782
7783 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7784
7785 /* Store wake reason if we're suspended */
7786 hci_store_wake_reason(hdev, event, skb);
7787
7788 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7789
7790 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7791 &req_complete_skb);
7792
7793 if (req_complete) {
7794 req_complete(hdev, status, opcode);
7795 } else if (req_complete_skb) {
7796 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7797 kfree_skb(orig_skb);
7798 orig_skb = NULL;
7799 }
7800 req_complete_skb(hdev, status, opcode, orig_skb);
7801 }
7802
7803 done:
7804 kfree_skb(orig_skb);
7805 kfree_skb(skb);
7806 hdev->stat.evt_rx++;
7807 }
7808