xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 5af2e235)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "smp.h"
40 #include "msft.h"
41 #include "eir.h"
42 
43 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
45 
46 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47 
48 /* Handle HCI Event packets */
49 
hci_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)50 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
51 			     u8 ev, size_t len)
52 {
53 	void *data;
54 
55 	data = skb_pull_data(skb, len);
56 	if (!data)
57 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
58 
59 	return data;
60 }
61 
hci_cc_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u16 op,size_t len)62 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
63 			     u16 op, size_t len)
64 {
65 	void *data;
66 
67 	data = skb_pull_data(skb, len);
68 	if (!data)
69 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
70 
71 	return data;
72 }
73 
hci_le_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)74 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
75 				u8 ev, size_t len)
76 {
77 	void *data;
78 
79 	data = skb_pull_data(skb, len);
80 	if (!data)
81 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
82 
83 	return data;
84 }
85 
hci_cc_inquiry_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)86 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 				struct sk_buff *skb)
88 {
89 	struct hci_ev_status *rp = data;
90 
91 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
92 
93 	/* It is possible that we receive Inquiry Complete event right
94 	 * before we receive Inquiry Cancel Command Complete event, in
95 	 * which case the latter event should have status of Command
96 	 * Disallowed (0x0c). This should not be treated as error, since
97 	 * we actually achieve what Inquiry Cancel wants to achieve,
98 	 * which is to end the last Inquiry session.
99 	 */
100 	if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
102 		rp->status = 0x00;
103 	}
104 
105 	if (rp->status)
106 		return rp->status;
107 
108 	clear_bit(HCI_INQUIRY, &hdev->flags);
109 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
111 
112 	hci_dev_lock(hdev);
113 	/* Set discovery state to stopped if we're not doing LE active
114 	 * scanning.
115 	 */
116 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
118 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119 	hci_dev_unlock(hdev);
120 
121 	hci_conn_check_pending(hdev);
122 
123 	return rp->status;
124 }
125 
hci_cc_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)126 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 			      struct sk_buff *skb)
128 {
129 	struct hci_ev_status *rp = data;
130 
131 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
132 
133 	if (rp->status)
134 		return rp->status;
135 
136 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
137 
138 	return rp->status;
139 }
140 
hci_cc_exit_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)141 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 				   struct sk_buff *skb)
143 {
144 	struct hci_ev_status *rp = data;
145 
146 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
147 
148 	if (rp->status)
149 		return rp->status;
150 
151 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
152 
153 	hci_conn_check_pending(hdev);
154 
155 	return rp->status;
156 }
157 
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)158 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
159 					struct sk_buff *skb)
160 {
161 	struct hci_ev_status *rp = data;
162 
163 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
164 
165 	return rp->status;
166 }
167 
hci_cc_role_discovery(struct hci_dev * hdev,void * data,struct sk_buff * skb)168 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
169 				struct sk_buff *skb)
170 {
171 	struct hci_rp_role_discovery *rp = data;
172 	struct hci_conn *conn;
173 
174 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
175 
176 	if (rp->status)
177 		return rp->status;
178 
179 	hci_dev_lock(hdev);
180 
181 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
182 	if (conn)
183 		conn->role = rp->role;
184 
185 	hci_dev_unlock(hdev);
186 
187 	return rp->status;
188 }
189 
hci_cc_read_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)190 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
191 				  struct sk_buff *skb)
192 {
193 	struct hci_rp_read_link_policy *rp = data;
194 	struct hci_conn *conn;
195 
196 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
197 
198 	if (rp->status)
199 		return rp->status;
200 
201 	hci_dev_lock(hdev);
202 
203 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
204 	if (conn)
205 		conn->link_policy = __le16_to_cpu(rp->policy);
206 
207 	hci_dev_unlock(hdev);
208 
209 	return rp->status;
210 }
211 
hci_cc_write_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)212 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
213 				   struct sk_buff *skb)
214 {
215 	struct hci_rp_write_link_policy *rp = data;
216 	struct hci_conn *conn;
217 	void *sent;
218 
219 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
220 
221 	if (rp->status)
222 		return rp->status;
223 
224 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
225 	if (!sent)
226 		return rp->status;
227 
228 	hci_dev_lock(hdev);
229 
230 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
231 	if (conn)
232 		conn->link_policy = get_unaligned_le16(sent + 2);
233 
234 	hci_dev_unlock(hdev);
235 
236 	return rp->status;
237 }
238 
hci_cc_read_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)239 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
240 				      struct sk_buff *skb)
241 {
242 	struct hci_rp_read_def_link_policy *rp = data;
243 
244 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
245 
246 	if (rp->status)
247 		return rp->status;
248 
249 	hdev->link_policy = __le16_to_cpu(rp->policy);
250 
251 	return rp->status;
252 }
253 
hci_cc_write_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)254 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
255 				       struct sk_buff *skb)
256 {
257 	struct hci_ev_status *rp = data;
258 	void *sent;
259 
260 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
261 
262 	if (rp->status)
263 		return rp->status;
264 
265 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
266 	if (!sent)
267 		return rp->status;
268 
269 	hdev->link_policy = get_unaligned_le16(sent);
270 
271 	return rp->status;
272 }
273 
hci_cc_reset(struct hci_dev * hdev,void * data,struct sk_buff * skb)274 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
275 {
276 	struct hci_ev_status *rp = data;
277 
278 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
279 
280 	clear_bit(HCI_RESET, &hdev->flags);
281 
282 	if (rp->status)
283 		return rp->status;
284 
285 	/* Reset all non-persistent flags */
286 	hci_dev_clear_volatile_flags(hdev);
287 
288 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
289 
290 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
291 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
292 
293 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
294 	hdev->adv_data_len = 0;
295 
296 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
297 	hdev->scan_rsp_data_len = 0;
298 
299 	hdev->le_scan_type = LE_SCAN_PASSIVE;
300 
301 	hdev->ssp_debug_mode = 0;
302 
303 	hci_bdaddr_list_clear(&hdev->le_accept_list);
304 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
305 
306 	return rp->status;
307 }
308 
hci_cc_read_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)309 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
310 				      struct sk_buff *skb)
311 {
312 	struct hci_rp_read_stored_link_key *rp = data;
313 	struct hci_cp_read_stored_link_key *sent;
314 
315 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
316 
317 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
318 	if (!sent)
319 		return rp->status;
320 
321 	if (!rp->status && sent->read_all == 0x01) {
322 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
323 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
324 	}
325 
326 	return rp->status;
327 }
328 
hci_cc_delete_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)329 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
330 					struct sk_buff *skb)
331 {
332 	struct hci_rp_delete_stored_link_key *rp = data;
333 	u16 num_keys;
334 
335 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
336 
337 	if (rp->status)
338 		return rp->status;
339 
340 	num_keys = le16_to_cpu(rp->num_keys);
341 
342 	if (num_keys <= hdev->stored_num_keys)
343 		hdev->stored_num_keys -= num_keys;
344 	else
345 		hdev->stored_num_keys = 0;
346 
347 	return rp->status;
348 }
349 
hci_cc_write_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)350 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
351 				  struct sk_buff *skb)
352 {
353 	struct hci_ev_status *rp = data;
354 	void *sent;
355 
356 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
357 
358 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
359 	if (!sent)
360 		return rp->status;
361 
362 	hci_dev_lock(hdev);
363 
364 	if (hci_dev_test_flag(hdev, HCI_MGMT))
365 		mgmt_set_local_name_complete(hdev, sent, rp->status);
366 	else if (!rp->status)
367 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
368 
369 	hci_dev_unlock(hdev);
370 
371 	return rp->status;
372 }
373 
hci_cc_read_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)374 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
375 				 struct sk_buff *skb)
376 {
377 	struct hci_rp_read_local_name *rp = data;
378 
379 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
380 
381 	if (rp->status)
382 		return rp->status;
383 
384 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
385 	    hci_dev_test_flag(hdev, HCI_CONFIG))
386 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
387 
388 	return rp->status;
389 }
390 
hci_cc_write_auth_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)391 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
392 				   struct sk_buff *skb)
393 {
394 	struct hci_ev_status *rp = data;
395 	void *sent;
396 
397 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
398 
399 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
400 	if (!sent)
401 		return rp->status;
402 
403 	hci_dev_lock(hdev);
404 
405 	if (!rp->status) {
406 		__u8 param = *((__u8 *) sent);
407 
408 		if (param == AUTH_ENABLED)
409 			set_bit(HCI_AUTH, &hdev->flags);
410 		else
411 			clear_bit(HCI_AUTH, &hdev->flags);
412 	}
413 
414 	if (hci_dev_test_flag(hdev, HCI_MGMT))
415 		mgmt_auth_enable_complete(hdev, rp->status);
416 
417 	hci_dev_unlock(hdev);
418 
419 	return rp->status;
420 }
421 
hci_cc_write_encrypt_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)422 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
423 				    struct sk_buff *skb)
424 {
425 	struct hci_ev_status *rp = data;
426 	__u8 param;
427 	void *sent;
428 
429 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
430 
431 	if (rp->status)
432 		return rp->status;
433 
434 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
435 	if (!sent)
436 		return rp->status;
437 
438 	param = *((__u8 *) sent);
439 
440 	if (param)
441 		set_bit(HCI_ENCRYPT, &hdev->flags);
442 	else
443 		clear_bit(HCI_ENCRYPT, &hdev->flags);
444 
445 	return rp->status;
446 }
447 
hci_cc_write_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)448 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
449 				   struct sk_buff *skb)
450 {
451 	struct hci_ev_status *rp = data;
452 	__u8 param;
453 	void *sent;
454 
455 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
456 
457 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
458 	if (!sent)
459 		return rp->status;
460 
461 	param = *((__u8 *) sent);
462 
463 	hci_dev_lock(hdev);
464 
465 	if (rp->status) {
466 		hdev->discov_timeout = 0;
467 		goto done;
468 	}
469 
470 	if (param & SCAN_INQUIRY)
471 		set_bit(HCI_ISCAN, &hdev->flags);
472 	else
473 		clear_bit(HCI_ISCAN, &hdev->flags);
474 
475 	if (param & SCAN_PAGE)
476 		set_bit(HCI_PSCAN, &hdev->flags);
477 	else
478 		clear_bit(HCI_PSCAN, &hdev->flags);
479 
480 done:
481 	hci_dev_unlock(hdev);
482 
483 	return rp->status;
484 }
485 
hci_cc_set_event_filter(struct hci_dev * hdev,void * data,struct sk_buff * skb)486 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
487 				  struct sk_buff *skb)
488 {
489 	struct hci_ev_status *rp = data;
490 	struct hci_cp_set_event_filter *cp;
491 	void *sent;
492 
493 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
494 
495 	if (rp->status)
496 		return rp->status;
497 
498 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
499 	if (!sent)
500 		return rp->status;
501 
502 	cp = (struct hci_cp_set_event_filter *)sent;
503 
504 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
505 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
506 	else
507 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508 
509 	return rp->status;
510 }
511 
hci_cc_read_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)512 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
513 				   struct sk_buff *skb)
514 {
515 	struct hci_rp_read_class_of_dev *rp = data;
516 
517 	if (WARN_ON(!hdev))
518 		return HCI_ERROR_UNSPECIFIED;
519 
520 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
521 
522 	if (rp->status)
523 		return rp->status;
524 
525 	memcpy(hdev->dev_class, rp->dev_class, 3);
526 
527 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
528 		   hdev->dev_class[1], hdev->dev_class[0]);
529 
530 	return rp->status;
531 }
532 
hci_cc_write_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)533 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
534 				    struct sk_buff *skb)
535 {
536 	struct hci_ev_status *rp = data;
537 	void *sent;
538 
539 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
540 
541 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
542 	if (!sent)
543 		return rp->status;
544 
545 	hci_dev_lock(hdev);
546 
547 	if (!rp->status)
548 		memcpy(hdev->dev_class, sent, 3);
549 
550 	if (hci_dev_test_flag(hdev, HCI_MGMT))
551 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
552 
553 	hci_dev_unlock(hdev);
554 
555 	return rp->status;
556 }
557 
hci_cc_read_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)558 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
559 				    struct sk_buff *skb)
560 {
561 	struct hci_rp_read_voice_setting *rp = data;
562 	__u16 setting;
563 
564 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
565 
566 	if (rp->status)
567 		return rp->status;
568 
569 	setting = __le16_to_cpu(rp->voice_setting);
570 
571 	if (hdev->voice_setting == setting)
572 		return rp->status;
573 
574 	hdev->voice_setting = setting;
575 
576 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
577 
578 	if (hdev->notify)
579 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
580 
581 	return rp->status;
582 }
583 
hci_cc_write_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)584 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
585 				     struct sk_buff *skb)
586 {
587 	struct hci_ev_status *rp = data;
588 	__u16 setting;
589 	void *sent;
590 
591 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
592 
593 	if (rp->status)
594 		return rp->status;
595 
596 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
597 	if (!sent)
598 		return rp->status;
599 
600 	setting = get_unaligned_le16(sent);
601 
602 	if (hdev->voice_setting == setting)
603 		return rp->status;
604 
605 	hdev->voice_setting = setting;
606 
607 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
608 
609 	if (hdev->notify)
610 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
611 
612 	return rp->status;
613 }
614 
hci_cc_read_num_supported_iac(struct hci_dev * hdev,void * data,struct sk_buff * skb)615 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
616 					struct sk_buff *skb)
617 {
618 	struct hci_rp_read_num_supported_iac *rp = data;
619 
620 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
621 
622 	if (rp->status)
623 		return rp->status;
624 
625 	hdev->num_iac = rp->num_iac;
626 
627 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
628 
629 	return rp->status;
630 }
631 
hci_cc_write_ssp_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)632 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
633 				struct sk_buff *skb)
634 {
635 	struct hci_ev_status *rp = data;
636 	struct hci_cp_write_ssp_mode *sent;
637 
638 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
639 
640 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
641 	if (!sent)
642 		return rp->status;
643 
644 	hci_dev_lock(hdev);
645 
646 	if (!rp->status) {
647 		if (sent->mode)
648 			hdev->features[1][0] |= LMP_HOST_SSP;
649 		else
650 			hdev->features[1][0] &= ~LMP_HOST_SSP;
651 	}
652 
653 	if (!rp->status) {
654 		if (sent->mode)
655 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
656 		else
657 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
658 	}
659 
660 	hci_dev_unlock(hdev);
661 
662 	return rp->status;
663 }
664 
hci_cc_write_sc_support(struct hci_dev * hdev,void * data,struct sk_buff * skb)665 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
666 				  struct sk_buff *skb)
667 {
668 	struct hci_ev_status *rp = data;
669 	struct hci_cp_write_sc_support *sent;
670 
671 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
672 
673 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
674 	if (!sent)
675 		return rp->status;
676 
677 	hci_dev_lock(hdev);
678 
679 	if (!rp->status) {
680 		if (sent->support)
681 			hdev->features[1][0] |= LMP_HOST_SC;
682 		else
683 			hdev->features[1][0] &= ~LMP_HOST_SC;
684 	}
685 
686 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
687 		if (sent->support)
688 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
689 		else
690 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
691 	}
692 
693 	hci_dev_unlock(hdev);
694 
695 	return rp->status;
696 }
697 
hci_cc_read_local_version(struct hci_dev * hdev,void * data,struct sk_buff * skb)698 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
699 				    struct sk_buff *skb)
700 {
701 	struct hci_rp_read_local_version *rp = data;
702 
703 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
704 
705 	if (rp->status)
706 		return rp->status;
707 
708 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
709 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
710 		hdev->hci_ver = rp->hci_ver;
711 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
712 		hdev->lmp_ver = rp->lmp_ver;
713 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
714 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
715 	}
716 
717 	return rp->status;
718 }
719 
hci_cc_read_enc_key_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)720 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
721 				   struct sk_buff *skb)
722 {
723 	struct hci_rp_read_enc_key_size *rp = data;
724 	struct hci_conn *conn;
725 	u16 handle;
726 	u8 status = rp->status;
727 
728 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
729 
730 	handle = le16_to_cpu(rp->handle);
731 
732 	hci_dev_lock(hdev);
733 
734 	conn = hci_conn_hash_lookup_handle(hdev, handle);
735 	if (!conn) {
736 		status = 0xFF;
737 		goto done;
738 	}
739 
740 	/* While unexpected, the read_enc_key_size command may fail. The most
741 	 * secure approach is to then assume the key size is 0 to force a
742 	 * disconnection.
743 	 */
744 	if (status) {
745 		bt_dev_err(hdev, "failed to read key size for handle %u",
746 			   handle);
747 		conn->enc_key_size = 0;
748 	} else {
749 		conn->enc_key_size = rp->key_size;
750 		status = 0;
751 
752 		if (conn->enc_key_size < hdev->min_enc_key_size) {
753 			/* As slave role, the conn->state has been set to
754 			 * BT_CONNECTED and l2cap conn req might not be received
755 			 * yet, at this moment the l2cap layer almost does
756 			 * nothing with the non-zero status.
757 			 * So we also clear encrypt related bits, and then the
758 			 * handler of l2cap conn req will get the right secure
759 			 * state at a later time.
760 			 */
761 			status = HCI_ERROR_AUTH_FAILURE;
762 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764 		}
765 	}
766 
767 	hci_encrypt_cfm(conn, status);
768 
769 done:
770 	hci_dev_unlock(hdev);
771 
772 	return status;
773 }
774 
hci_cc_read_local_commands(struct hci_dev * hdev,void * data,struct sk_buff * skb)775 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
776 				     struct sk_buff *skb)
777 {
778 	struct hci_rp_read_local_commands *rp = data;
779 
780 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
781 
782 	if (rp->status)
783 		return rp->status;
784 
785 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
786 	    hci_dev_test_flag(hdev, HCI_CONFIG))
787 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
788 
789 	return rp->status;
790 }
791 
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)792 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
793 					   struct sk_buff *skb)
794 {
795 	struct hci_rp_read_auth_payload_to *rp = data;
796 	struct hci_conn *conn;
797 
798 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
799 
800 	if (rp->status)
801 		return rp->status;
802 
803 	hci_dev_lock(hdev);
804 
805 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
806 	if (conn)
807 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
808 
809 	hci_dev_unlock(hdev);
810 
811 	return rp->status;
812 }
813 
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)814 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
815 					    struct sk_buff *skb)
816 {
817 	struct hci_rp_write_auth_payload_to *rp = data;
818 	struct hci_conn *conn;
819 	void *sent;
820 
821 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
822 
823 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
824 	if (!sent)
825 		return rp->status;
826 
827 	hci_dev_lock(hdev);
828 
829 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
830 	if (!conn) {
831 		rp->status = 0xff;
832 		goto unlock;
833 	}
834 
835 	if (!rp->status)
836 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
837 
838 unlock:
839 	hci_dev_unlock(hdev);
840 
841 	return rp->status;
842 }
843 
hci_cc_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)844 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
845 				     struct sk_buff *skb)
846 {
847 	struct hci_rp_read_local_features *rp = data;
848 
849 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
850 
851 	if (rp->status)
852 		return rp->status;
853 
854 	memcpy(hdev->features, rp->features, 8);
855 
856 	/* Adjust default settings according to features
857 	 * supported by device. */
858 
859 	if (hdev->features[0][0] & LMP_3SLOT)
860 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
861 
862 	if (hdev->features[0][0] & LMP_5SLOT)
863 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
864 
865 	if (hdev->features[0][1] & LMP_HV2) {
866 		hdev->pkt_type  |= (HCI_HV2);
867 		hdev->esco_type |= (ESCO_HV2);
868 	}
869 
870 	if (hdev->features[0][1] & LMP_HV3) {
871 		hdev->pkt_type  |= (HCI_HV3);
872 		hdev->esco_type |= (ESCO_HV3);
873 	}
874 
875 	if (lmp_esco_capable(hdev))
876 		hdev->esco_type |= (ESCO_EV3);
877 
878 	if (hdev->features[0][4] & LMP_EV4)
879 		hdev->esco_type |= (ESCO_EV4);
880 
881 	if (hdev->features[0][4] & LMP_EV5)
882 		hdev->esco_type |= (ESCO_EV5);
883 
884 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
885 		hdev->esco_type |= (ESCO_2EV3);
886 
887 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
888 		hdev->esco_type |= (ESCO_3EV3);
889 
890 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
891 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
892 
893 	return rp->status;
894 }
895 
hci_cc_read_local_ext_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)896 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
897 					 struct sk_buff *skb)
898 {
899 	struct hci_rp_read_local_ext_features *rp = data;
900 
901 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
902 
903 	if (rp->status)
904 		return rp->status;
905 
906 	if (hdev->max_page < rp->max_page) {
907 		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
908 			     &hdev->quirks))
909 			bt_dev_warn(hdev, "broken local ext features page 2");
910 		else
911 			hdev->max_page = rp->max_page;
912 	}
913 
914 	if (rp->page < HCI_MAX_PAGES)
915 		memcpy(hdev->features[rp->page], rp->features, 8);
916 
917 	return rp->status;
918 }
919 
hci_cc_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)920 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
921 				  struct sk_buff *skb)
922 {
923 	struct hci_rp_read_buffer_size *rp = data;
924 
925 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
926 
927 	if (rp->status)
928 		return rp->status;
929 
930 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
931 	hdev->sco_mtu  = rp->sco_mtu;
932 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
933 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
934 
935 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
936 		hdev->sco_mtu  = 64;
937 		hdev->sco_pkts = 8;
938 	}
939 
940 	hdev->acl_cnt = hdev->acl_pkts;
941 	hdev->sco_cnt = hdev->sco_pkts;
942 
943 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
944 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
945 
946 	if (!hdev->acl_mtu || !hdev->acl_pkts)
947 		return HCI_ERROR_INVALID_PARAMETERS;
948 
949 	return rp->status;
950 }
951 
hci_cc_read_bd_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)952 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
953 			      struct sk_buff *skb)
954 {
955 	struct hci_rp_read_bd_addr *rp = data;
956 
957 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
958 
959 	if (rp->status)
960 		return rp->status;
961 
962 	if (test_bit(HCI_INIT, &hdev->flags))
963 		bacpy(&hdev->bdaddr, &rp->bdaddr);
964 
965 	if (hci_dev_test_flag(hdev, HCI_SETUP))
966 		bacpy(&hdev->setup_addr, &rp->bdaddr);
967 
968 	return rp->status;
969 }
970 
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,void * data,struct sk_buff * skb)971 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
972 					 struct sk_buff *skb)
973 {
974 	struct hci_rp_read_local_pairing_opts *rp = data;
975 
976 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
977 
978 	if (rp->status)
979 		return rp->status;
980 
981 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
982 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
983 		hdev->pairing_opts = rp->pairing_opts;
984 		hdev->max_enc_key_size = rp->max_key_size;
985 	}
986 
987 	return rp->status;
988 }
989 
hci_cc_read_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)990 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
991 					 struct sk_buff *skb)
992 {
993 	struct hci_rp_read_page_scan_activity *rp = data;
994 
995 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
996 
997 	if (rp->status)
998 		return rp->status;
999 
1000 	if (test_bit(HCI_INIT, &hdev->flags)) {
1001 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1002 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1003 	}
1004 
1005 	return rp->status;
1006 }
1007 
hci_cc_write_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1008 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1009 					  struct sk_buff *skb)
1010 {
1011 	struct hci_ev_status *rp = data;
1012 	struct hci_cp_write_page_scan_activity *sent;
1013 
1014 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1015 
1016 	if (rp->status)
1017 		return rp->status;
1018 
1019 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1020 	if (!sent)
1021 		return rp->status;
1022 
1023 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1024 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1025 
1026 	return rp->status;
1027 }
1028 
hci_cc_read_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1029 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1030 				     struct sk_buff *skb)
1031 {
1032 	struct hci_rp_read_page_scan_type *rp = data;
1033 
1034 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1035 
1036 	if (rp->status)
1037 		return rp->status;
1038 
1039 	if (test_bit(HCI_INIT, &hdev->flags))
1040 		hdev->page_scan_type = rp->type;
1041 
1042 	return rp->status;
1043 }
1044 
hci_cc_write_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1045 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1046 				      struct sk_buff *skb)
1047 {
1048 	struct hci_ev_status *rp = data;
1049 	u8 *type;
1050 
1051 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1052 
1053 	if (rp->status)
1054 		return rp->status;
1055 
1056 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1057 	if (type)
1058 		hdev->page_scan_type = *type;
1059 
1060 	return rp->status;
1061 }
1062 
hci_cc_read_clock(struct hci_dev * hdev,void * data,struct sk_buff * skb)1063 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1064 			    struct sk_buff *skb)
1065 {
1066 	struct hci_rp_read_clock *rp = data;
1067 	struct hci_cp_read_clock *cp;
1068 	struct hci_conn *conn;
1069 
1070 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1071 
1072 	if (rp->status)
1073 		return rp->status;
1074 
1075 	hci_dev_lock(hdev);
1076 
1077 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1078 	if (!cp)
1079 		goto unlock;
1080 
1081 	if (cp->which == 0x00) {
1082 		hdev->clock = le32_to_cpu(rp->clock);
1083 		goto unlock;
1084 	}
1085 
1086 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1087 	if (conn) {
1088 		conn->clock = le32_to_cpu(rp->clock);
1089 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1090 	}
1091 
1092 unlock:
1093 	hci_dev_unlock(hdev);
1094 	return rp->status;
1095 }
1096 
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1097 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1098 				       struct sk_buff *skb)
1099 {
1100 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1101 
1102 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1103 
1104 	if (rp->status)
1105 		return rp->status;
1106 
1107 	hdev->inq_tx_power = rp->tx_power;
1108 
1109 	return rp->status;
1110 }
1111 
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1112 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1113 					     struct sk_buff *skb)
1114 {
1115 	struct hci_rp_read_def_err_data_reporting *rp = data;
1116 
1117 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1118 
1119 	if (rp->status)
1120 		return rp->status;
1121 
1122 	hdev->err_data_reporting = rp->err_data_reporting;
1123 
1124 	return rp->status;
1125 }
1126 
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1127 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1128 					      struct sk_buff *skb)
1129 {
1130 	struct hci_ev_status *rp = data;
1131 	struct hci_cp_write_def_err_data_reporting *cp;
1132 
1133 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1134 
1135 	if (rp->status)
1136 		return rp->status;
1137 
1138 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1139 	if (!cp)
1140 		return rp->status;
1141 
1142 	hdev->err_data_reporting = cp->err_data_reporting;
1143 
1144 	return rp->status;
1145 }
1146 
hci_cc_pin_code_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1147 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1148 				struct sk_buff *skb)
1149 {
1150 	struct hci_rp_pin_code_reply *rp = data;
1151 	struct hci_cp_pin_code_reply *cp;
1152 	struct hci_conn *conn;
1153 
1154 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1155 
1156 	hci_dev_lock(hdev);
1157 
1158 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1159 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1160 
1161 	if (rp->status)
1162 		goto unlock;
1163 
1164 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1165 	if (!cp)
1166 		goto unlock;
1167 
1168 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1169 	if (conn)
1170 		conn->pin_length = cp->pin_len;
1171 
1172 unlock:
1173 	hci_dev_unlock(hdev);
1174 	return rp->status;
1175 }
1176 
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1177 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1178 				    struct sk_buff *skb)
1179 {
1180 	struct hci_rp_pin_code_neg_reply *rp = data;
1181 
1182 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1183 
1184 	hci_dev_lock(hdev);
1185 
1186 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1187 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1188 						 rp->status);
1189 
1190 	hci_dev_unlock(hdev);
1191 
1192 	return rp->status;
1193 }
1194 
hci_cc_le_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1195 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1196 				     struct sk_buff *skb)
1197 {
1198 	struct hci_rp_le_read_buffer_size *rp = data;
1199 
1200 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1201 
1202 	if (rp->status)
1203 		return rp->status;
1204 
1205 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1206 	hdev->le_pkts = rp->le_max_pkt;
1207 
1208 	hdev->le_cnt = hdev->le_pkts;
1209 
1210 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1211 
1212 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1213 		return HCI_ERROR_INVALID_PARAMETERS;
1214 
1215 	return rp->status;
1216 }
1217 
hci_cc_le_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)1218 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1219 					struct sk_buff *skb)
1220 {
1221 	struct hci_rp_le_read_local_features *rp = data;
1222 
1223 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1224 
1225 	if (rp->status)
1226 		return rp->status;
1227 
1228 	memcpy(hdev->le_features, rp->features, 8);
1229 
1230 	return rp->status;
1231 }
1232 
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1233 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1234 				      struct sk_buff *skb)
1235 {
1236 	struct hci_rp_le_read_adv_tx_power *rp = data;
1237 
1238 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1239 
1240 	if (rp->status)
1241 		return rp->status;
1242 
1243 	hdev->adv_tx_power = rp->tx_power;
1244 
1245 	return rp->status;
1246 }
1247 
hci_cc_user_confirm_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1248 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1249 				    struct sk_buff *skb)
1250 {
1251 	struct hci_rp_user_confirm_reply *rp = data;
1252 
1253 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1254 
1255 	hci_dev_lock(hdev);
1256 
1257 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1258 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1259 						 rp->status);
1260 
1261 	hci_dev_unlock(hdev);
1262 
1263 	return rp->status;
1264 }
1265 
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1266 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1267 					struct sk_buff *skb)
1268 {
1269 	struct hci_rp_user_confirm_reply *rp = data;
1270 
1271 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1272 
1273 	hci_dev_lock(hdev);
1274 
1275 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1276 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1277 						     ACL_LINK, 0, rp->status);
1278 
1279 	hci_dev_unlock(hdev);
1280 
1281 	return rp->status;
1282 }
1283 
hci_cc_user_passkey_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1284 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1285 				    struct sk_buff *skb)
1286 {
1287 	struct hci_rp_user_confirm_reply *rp = data;
1288 
1289 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1290 
1291 	hci_dev_lock(hdev);
1292 
1293 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1294 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1295 						 0, rp->status);
1296 
1297 	hci_dev_unlock(hdev);
1298 
1299 	return rp->status;
1300 }
1301 
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1302 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1303 					struct sk_buff *skb)
1304 {
1305 	struct hci_rp_user_confirm_reply *rp = data;
1306 
1307 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1308 
1309 	hci_dev_lock(hdev);
1310 
1311 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1312 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1313 						     ACL_LINK, 0, rp->status);
1314 
1315 	hci_dev_unlock(hdev);
1316 
1317 	return rp->status;
1318 }
1319 
hci_cc_read_local_oob_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1320 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1321 				     struct sk_buff *skb)
1322 {
1323 	struct hci_rp_read_local_oob_data *rp = data;
1324 
1325 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1326 
1327 	return rp->status;
1328 }
1329 
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1330 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1331 					 struct sk_buff *skb)
1332 {
1333 	struct hci_rp_read_local_oob_ext_data *rp = data;
1334 
1335 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1336 
1337 	return rp->status;
1338 }
1339 
hci_cc_le_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1340 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1341 				    struct sk_buff *skb)
1342 {
1343 	struct hci_ev_status *rp = data;
1344 	bdaddr_t *sent;
1345 
1346 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1347 
1348 	if (rp->status)
1349 		return rp->status;
1350 
1351 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1352 	if (!sent)
1353 		return rp->status;
1354 
1355 	hci_dev_lock(hdev);
1356 
1357 	bacpy(&hdev->random_addr, sent);
1358 
1359 	if (!bacmp(&hdev->rpa, sent)) {
1360 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1361 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1362 				   secs_to_jiffies(hdev->rpa_timeout));
1363 	}
1364 
1365 	hci_dev_unlock(hdev);
1366 
1367 	return rp->status;
1368 }
1369 
hci_cc_le_set_default_phy(struct hci_dev * hdev,void * data,struct sk_buff * skb)1370 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1371 				    struct sk_buff *skb)
1372 {
1373 	struct hci_ev_status *rp = data;
1374 	struct hci_cp_le_set_default_phy *cp;
1375 
1376 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1377 
1378 	if (rp->status)
1379 		return rp->status;
1380 
1381 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1382 	if (!cp)
1383 		return rp->status;
1384 
1385 	hci_dev_lock(hdev);
1386 
1387 	hdev->le_tx_def_phys = cp->tx_phys;
1388 	hdev->le_rx_def_phys = cp->rx_phys;
1389 
1390 	hci_dev_unlock(hdev);
1391 
1392 	return rp->status;
1393 }
1394 
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1395 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1396 					    struct sk_buff *skb)
1397 {
1398 	struct hci_ev_status *rp = data;
1399 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1400 	struct adv_info *adv;
1401 
1402 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1403 
1404 	if (rp->status)
1405 		return rp->status;
1406 
1407 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1408 	/* Update only in case the adv instance since handle 0x00 shall be using
1409 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1410 	 * non-extended adverting.
1411 	 */
1412 	if (!cp || !cp->handle)
1413 		return rp->status;
1414 
1415 	hci_dev_lock(hdev);
1416 
1417 	adv = hci_find_adv_instance(hdev, cp->handle);
1418 	if (adv) {
1419 		bacpy(&adv->random_addr, &cp->bdaddr);
1420 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1421 			adv->rpa_expired = false;
1422 			queue_delayed_work(hdev->workqueue,
1423 					   &adv->rpa_expired_cb,
1424 					   secs_to_jiffies(hdev->rpa_timeout));
1425 		}
1426 	}
1427 
1428 	hci_dev_unlock(hdev);
1429 
1430 	return rp->status;
1431 }
1432 
hci_cc_le_remove_adv_set(struct hci_dev * hdev,void * data,struct sk_buff * skb)1433 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1434 				   struct sk_buff *skb)
1435 {
1436 	struct hci_ev_status *rp = data;
1437 	u8 *instance;
1438 	int err;
1439 
1440 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1441 
1442 	if (rp->status)
1443 		return rp->status;
1444 
1445 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1446 	if (!instance)
1447 		return rp->status;
1448 
1449 	hci_dev_lock(hdev);
1450 
1451 	err = hci_remove_adv_instance(hdev, *instance);
1452 	if (!err)
1453 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1454 					 *instance);
1455 
1456 	hci_dev_unlock(hdev);
1457 
1458 	return rp->status;
1459 }
1460 
hci_cc_le_clear_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1461 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1462 				   struct sk_buff *skb)
1463 {
1464 	struct hci_ev_status *rp = data;
1465 	struct adv_info *adv, *n;
1466 	int err;
1467 
1468 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1469 
1470 	if (rp->status)
1471 		return rp->status;
1472 
1473 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1474 		return rp->status;
1475 
1476 	hci_dev_lock(hdev);
1477 
1478 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1479 		u8 instance = adv->instance;
1480 
1481 		err = hci_remove_adv_instance(hdev, instance);
1482 		if (!err)
1483 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1484 						 hdev, instance);
1485 	}
1486 
1487 	hci_dev_unlock(hdev);
1488 
1489 	return rp->status;
1490 }
1491 
hci_cc_le_read_transmit_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1492 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1493 					struct sk_buff *skb)
1494 {
1495 	struct hci_rp_le_read_transmit_power *rp = data;
1496 
1497 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1498 
1499 	if (rp->status)
1500 		return rp->status;
1501 
1502 	hdev->min_le_tx_power = rp->min_le_tx_power;
1503 	hdev->max_le_tx_power = rp->max_le_tx_power;
1504 
1505 	return rp->status;
1506 }
1507 
hci_cc_le_set_privacy_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)1508 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1509 				     struct sk_buff *skb)
1510 {
1511 	struct hci_ev_status *rp = data;
1512 	struct hci_cp_le_set_privacy_mode *cp;
1513 	struct hci_conn_params *params;
1514 
1515 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1516 
1517 	if (rp->status)
1518 		return rp->status;
1519 
1520 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1521 	if (!cp)
1522 		return rp->status;
1523 
1524 	hci_dev_lock(hdev);
1525 
1526 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1527 	if (params)
1528 		WRITE_ONCE(params->privacy_mode, cp->mode);
1529 
1530 	hci_dev_unlock(hdev);
1531 
1532 	return rp->status;
1533 }
1534 
hci_cc_le_set_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1535 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1536 				   struct sk_buff *skb)
1537 {
1538 	struct hci_ev_status *rp = data;
1539 	__u8 *sent;
1540 
1541 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1542 
1543 	if (rp->status)
1544 		return rp->status;
1545 
1546 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1547 	if (!sent)
1548 		return rp->status;
1549 
1550 	hci_dev_lock(hdev);
1551 
1552 	/* If we're doing connection initiation as peripheral. Set a
1553 	 * timeout in case something goes wrong.
1554 	 */
1555 	if (*sent) {
1556 		struct hci_conn *conn;
1557 
1558 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1559 
1560 		conn = hci_lookup_le_connect(hdev);
1561 		if (conn)
1562 			queue_delayed_work(hdev->workqueue,
1563 					   &conn->le_conn_timeout,
1564 					   conn->conn_timeout);
1565 	} else {
1566 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1567 	}
1568 
1569 	hci_dev_unlock(hdev);
1570 
1571 	return rp->status;
1572 }
1573 
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1574 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1575 				       struct sk_buff *skb)
1576 {
1577 	struct hci_cp_le_set_ext_adv_enable *cp;
1578 	struct hci_cp_ext_adv_set *set;
1579 	struct adv_info *adv = NULL, *n;
1580 	struct hci_ev_status *rp = data;
1581 
1582 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1583 
1584 	if (rp->status)
1585 		return rp->status;
1586 
1587 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1588 	if (!cp)
1589 		return rp->status;
1590 
1591 	set = (void *)cp->data;
1592 
1593 	hci_dev_lock(hdev);
1594 
1595 	if (cp->num_of_sets)
1596 		adv = hci_find_adv_instance(hdev, set->handle);
1597 
1598 	if (cp->enable) {
1599 		struct hci_conn *conn;
1600 
1601 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1602 
1603 		if (adv && !adv->periodic)
1604 			adv->enabled = true;
1605 
1606 		conn = hci_lookup_le_connect(hdev);
1607 		if (conn)
1608 			queue_delayed_work(hdev->workqueue,
1609 					   &conn->le_conn_timeout,
1610 					   conn->conn_timeout);
1611 	} else {
1612 		if (cp->num_of_sets) {
1613 			if (adv)
1614 				adv->enabled = false;
1615 
1616 			/* If just one instance was disabled check if there are
1617 			 * any other instance enabled before clearing HCI_LE_ADV
1618 			 */
1619 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1620 						 list) {
1621 				if (adv->enabled)
1622 					goto unlock;
1623 			}
1624 		} else {
1625 			/* All instances shall be considered disabled */
1626 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1627 						 list)
1628 				adv->enabled = false;
1629 		}
1630 
1631 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1632 	}
1633 
1634 unlock:
1635 	hci_dev_unlock(hdev);
1636 	return rp->status;
1637 }
1638 
hci_cc_le_set_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1639 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1640 				   struct sk_buff *skb)
1641 {
1642 	struct hci_cp_le_set_scan_param *cp;
1643 	struct hci_ev_status *rp = data;
1644 
1645 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1646 
1647 	if (rp->status)
1648 		return rp->status;
1649 
1650 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1651 	if (!cp)
1652 		return rp->status;
1653 
1654 	hci_dev_lock(hdev);
1655 
1656 	hdev->le_scan_type = cp->type;
1657 
1658 	hci_dev_unlock(hdev);
1659 
1660 	return rp->status;
1661 }
1662 
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1663 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1664 				       struct sk_buff *skb)
1665 {
1666 	struct hci_cp_le_set_ext_scan_params *cp;
1667 	struct hci_ev_status *rp = data;
1668 	struct hci_cp_le_scan_phy_params *phy_param;
1669 
1670 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1671 
1672 	if (rp->status)
1673 		return rp->status;
1674 
1675 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1676 	if (!cp)
1677 		return rp->status;
1678 
1679 	phy_param = (void *)cp->data;
1680 
1681 	hci_dev_lock(hdev);
1682 
1683 	hdev->le_scan_type = phy_param->type;
1684 
1685 	hci_dev_unlock(hdev);
1686 
1687 	return rp->status;
1688 }
1689 
has_pending_adv_report(struct hci_dev * hdev)1690 static bool has_pending_adv_report(struct hci_dev *hdev)
1691 {
1692 	struct discovery_state *d = &hdev->discovery;
1693 
1694 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1695 }
1696 
clear_pending_adv_report(struct hci_dev * hdev)1697 static void clear_pending_adv_report(struct hci_dev *hdev)
1698 {
1699 	struct discovery_state *d = &hdev->discovery;
1700 
1701 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1702 	d->last_adv_data_len = 0;
1703 }
1704 
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1705 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1706 				     u8 bdaddr_type, s8 rssi, u32 flags,
1707 				     u8 *data, u8 len)
1708 {
1709 	struct discovery_state *d = &hdev->discovery;
1710 
1711 	if (len > max_adv_len(hdev))
1712 		return;
1713 
1714 	bacpy(&d->last_adv_addr, bdaddr);
1715 	d->last_adv_addr_type = bdaddr_type;
1716 	d->last_adv_rssi = rssi;
1717 	d->last_adv_flags = flags;
1718 	memcpy(d->last_adv_data, data, len);
1719 	d->last_adv_data_len = len;
1720 }
1721 
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1722 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1723 {
1724 	hci_dev_lock(hdev);
1725 
1726 	switch (enable) {
1727 	case LE_SCAN_ENABLE:
1728 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1729 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1730 			clear_pending_adv_report(hdev);
1731 		if (hci_dev_test_flag(hdev, HCI_MESH))
1732 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1733 		break;
1734 
1735 	case LE_SCAN_DISABLE:
1736 		/* We do this here instead of when setting DISCOVERY_STOPPED
1737 		 * since the latter would potentially require waiting for
1738 		 * inquiry to stop too.
1739 		 */
1740 		if (has_pending_adv_report(hdev)) {
1741 			struct discovery_state *d = &hdev->discovery;
1742 
1743 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1744 					  d->last_adv_addr_type, NULL,
1745 					  d->last_adv_rssi, d->last_adv_flags,
1746 					  d->last_adv_data,
1747 					  d->last_adv_data_len, NULL, 0, 0);
1748 		}
1749 
1750 		/* Cancel this timer so that we don't try to disable scanning
1751 		 * when it's already disabled.
1752 		 */
1753 		cancel_delayed_work(&hdev->le_scan_disable);
1754 
1755 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1756 
1757 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1758 		 * interrupted scanning due to a connect request. Mark
1759 		 * therefore discovery as stopped.
1760 		 */
1761 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1762 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1763 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1764 			 hdev->discovery.state == DISCOVERY_FINDING)
1765 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1766 
1767 		break;
1768 
1769 	default:
1770 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1771 			   enable);
1772 		break;
1773 	}
1774 
1775 	hci_dev_unlock(hdev);
1776 }
1777 
hci_cc_le_set_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1778 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1779 				    struct sk_buff *skb)
1780 {
1781 	struct hci_cp_le_set_scan_enable *cp;
1782 	struct hci_ev_status *rp = data;
1783 
1784 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1785 
1786 	if (rp->status)
1787 		return rp->status;
1788 
1789 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1790 	if (!cp)
1791 		return rp->status;
1792 
1793 	le_set_scan_enable_complete(hdev, cp->enable);
1794 
1795 	return rp->status;
1796 }
1797 
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1798 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1799 					struct sk_buff *skb)
1800 {
1801 	struct hci_cp_le_set_ext_scan_enable *cp;
1802 	struct hci_ev_status *rp = data;
1803 
1804 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1805 
1806 	if (rp->status)
1807 		return rp->status;
1808 
1809 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1810 	if (!cp)
1811 		return rp->status;
1812 
1813 	le_set_scan_enable_complete(hdev, cp->enable);
1814 
1815 	return rp->status;
1816 }
1817 
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1818 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1819 				      struct sk_buff *skb)
1820 {
1821 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1822 
1823 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1824 		   rp->num_of_sets);
1825 
1826 	if (rp->status)
1827 		return rp->status;
1828 
1829 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1830 
1831 	return rp->status;
1832 }
1833 
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1834 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1835 					  struct sk_buff *skb)
1836 {
1837 	struct hci_rp_le_read_accept_list_size *rp = data;
1838 
1839 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1840 
1841 	if (rp->status)
1842 		return rp->status;
1843 
1844 	hdev->le_accept_list_size = rp->size;
1845 
1846 	return rp->status;
1847 }
1848 
hci_cc_le_clear_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1849 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1850 				      struct sk_buff *skb)
1851 {
1852 	struct hci_ev_status *rp = data;
1853 
1854 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1855 
1856 	if (rp->status)
1857 		return rp->status;
1858 
1859 	hci_dev_lock(hdev);
1860 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1861 	hci_dev_unlock(hdev);
1862 
1863 	return rp->status;
1864 }
1865 
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1866 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1867 				       struct sk_buff *skb)
1868 {
1869 	struct hci_cp_le_add_to_accept_list *sent;
1870 	struct hci_ev_status *rp = data;
1871 
1872 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1873 
1874 	if (rp->status)
1875 		return rp->status;
1876 
1877 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1878 	if (!sent)
1879 		return rp->status;
1880 
1881 	hci_dev_lock(hdev);
1882 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1883 			    sent->bdaddr_type);
1884 	hci_dev_unlock(hdev);
1885 
1886 	return rp->status;
1887 }
1888 
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1889 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1890 					 struct sk_buff *skb)
1891 {
1892 	struct hci_cp_le_del_from_accept_list *sent;
1893 	struct hci_ev_status *rp = data;
1894 
1895 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1896 
1897 	if (rp->status)
1898 		return rp->status;
1899 
1900 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1901 	if (!sent)
1902 		return rp->status;
1903 
1904 	hci_dev_lock(hdev);
1905 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1906 			    sent->bdaddr_type);
1907 	hci_dev_unlock(hdev);
1908 
1909 	return rp->status;
1910 }
1911 
hci_cc_le_read_supported_states(struct hci_dev * hdev,void * data,struct sk_buff * skb)1912 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1913 					  struct sk_buff *skb)
1914 {
1915 	struct hci_rp_le_read_supported_states *rp = data;
1916 
1917 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1918 
1919 	if (rp->status)
1920 		return rp->status;
1921 
1922 	memcpy(hdev->le_states, rp->le_states, 8);
1923 
1924 	return rp->status;
1925 }
1926 
hci_cc_le_read_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1927 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1928 				      struct sk_buff *skb)
1929 {
1930 	struct hci_rp_le_read_def_data_len *rp = data;
1931 
1932 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1933 
1934 	if (rp->status)
1935 		return rp->status;
1936 
1937 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1938 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1939 
1940 	return rp->status;
1941 }
1942 
hci_cc_le_write_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1943 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1944 				       struct sk_buff *skb)
1945 {
1946 	struct hci_cp_le_write_def_data_len *sent;
1947 	struct hci_ev_status *rp = data;
1948 
1949 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1950 
1951 	if (rp->status)
1952 		return rp->status;
1953 
1954 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1955 	if (!sent)
1956 		return rp->status;
1957 
1958 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1959 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1960 
1961 	return rp->status;
1962 }
1963 
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1964 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1965 				       struct sk_buff *skb)
1966 {
1967 	struct hci_cp_le_add_to_resolv_list *sent;
1968 	struct hci_ev_status *rp = data;
1969 
1970 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1971 
1972 	if (rp->status)
1973 		return rp->status;
1974 
1975 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1976 	if (!sent)
1977 		return rp->status;
1978 
1979 	hci_dev_lock(hdev);
1980 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1981 				sent->bdaddr_type, sent->peer_irk,
1982 				sent->local_irk);
1983 	hci_dev_unlock(hdev);
1984 
1985 	return rp->status;
1986 }
1987 
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1988 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1989 					 struct sk_buff *skb)
1990 {
1991 	struct hci_cp_le_del_from_resolv_list *sent;
1992 	struct hci_ev_status *rp = data;
1993 
1994 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1995 
1996 	if (rp->status)
1997 		return rp->status;
1998 
1999 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2000 	if (!sent)
2001 		return rp->status;
2002 
2003 	hci_dev_lock(hdev);
2004 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2005 			    sent->bdaddr_type);
2006 	hci_dev_unlock(hdev);
2007 
2008 	return rp->status;
2009 }
2010 
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2011 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2012 				      struct sk_buff *skb)
2013 {
2014 	struct hci_ev_status *rp = data;
2015 
2016 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2017 
2018 	if (rp->status)
2019 		return rp->status;
2020 
2021 	hci_dev_lock(hdev);
2022 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2023 	hci_dev_unlock(hdev);
2024 
2025 	return rp->status;
2026 }
2027 
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)2028 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2029 					  struct sk_buff *skb)
2030 {
2031 	struct hci_rp_le_read_resolv_list_size *rp = data;
2032 
2033 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2034 
2035 	if (rp->status)
2036 		return rp->status;
2037 
2038 	hdev->le_resolv_list_size = rp->size;
2039 
2040 	return rp->status;
2041 }
2042 
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)2043 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2044 					       struct sk_buff *skb)
2045 {
2046 	struct hci_ev_status *rp = data;
2047 	__u8 *sent;
2048 
2049 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2050 
2051 	if (rp->status)
2052 		return rp->status;
2053 
2054 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2055 	if (!sent)
2056 		return rp->status;
2057 
2058 	hci_dev_lock(hdev);
2059 
2060 	if (*sent)
2061 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2062 	else
2063 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2064 
2065 	hci_dev_unlock(hdev);
2066 
2067 	return rp->status;
2068 }
2069 
hci_cc_le_read_max_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2070 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2071 				      struct sk_buff *skb)
2072 {
2073 	struct hci_rp_le_read_max_data_len *rp = data;
2074 
2075 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2076 
2077 	if (rp->status)
2078 		return rp->status;
2079 
2080 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2081 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2082 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2083 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2084 
2085 	return rp->status;
2086 }
2087 
hci_cc_write_le_host_supported(struct hci_dev * hdev,void * data,struct sk_buff * skb)2088 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2089 					 struct sk_buff *skb)
2090 {
2091 	struct hci_cp_write_le_host_supported *sent;
2092 	struct hci_ev_status *rp = data;
2093 
2094 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2095 
2096 	if (rp->status)
2097 		return rp->status;
2098 
2099 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2100 	if (!sent)
2101 		return rp->status;
2102 
2103 	hci_dev_lock(hdev);
2104 
2105 	if (sent->le) {
2106 		hdev->features[1][0] |= LMP_HOST_LE;
2107 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2108 	} else {
2109 		hdev->features[1][0] &= ~LMP_HOST_LE;
2110 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2111 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2112 	}
2113 
2114 	if (sent->simul)
2115 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2116 	else
2117 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2118 
2119 	hci_dev_unlock(hdev);
2120 
2121 	return rp->status;
2122 }
2123 
hci_cc_set_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2124 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2125 			       struct sk_buff *skb)
2126 {
2127 	struct hci_cp_le_set_adv_param *cp;
2128 	struct hci_ev_status *rp = data;
2129 
2130 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2131 
2132 	if (rp->status)
2133 		return rp->status;
2134 
2135 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2136 	if (!cp)
2137 		return rp->status;
2138 
2139 	hci_dev_lock(hdev);
2140 	hdev->adv_addr_type = cp->own_address_type;
2141 	hci_dev_unlock(hdev);
2142 
2143 	return rp->status;
2144 }
2145 
hci_cc_set_ext_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2146 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2147 				   struct sk_buff *skb)
2148 {
2149 	struct hci_rp_le_set_ext_adv_params *rp = data;
2150 	struct hci_cp_le_set_ext_adv_params *cp;
2151 	struct adv_info *adv_instance;
2152 
2153 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2154 
2155 	if (rp->status)
2156 		return rp->status;
2157 
2158 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2159 	if (!cp)
2160 		return rp->status;
2161 
2162 	hci_dev_lock(hdev);
2163 	hdev->adv_addr_type = cp->own_addr_type;
2164 	if (!cp->handle) {
2165 		/* Store in hdev for instance 0 */
2166 		hdev->adv_tx_power = rp->tx_power;
2167 	} else {
2168 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2169 		if (adv_instance)
2170 			adv_instance->tx_power = rp->tx_power;
2171 	}
2172 	/* Update adv data as tx power is known now */
2173 	hci_update_adv_data(hdev, cp->handle);
2174 
2175 	hci_dev_unlock(hdev);
2176 
2177 	return rp->status;
2178 }
2179 
hci_cc_read_rssi(struct hci_dev * hdev,void * data,struct sk_buff * skb)2180 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2181 			   struct sk_buff *skb)
2182 {
2183 	struct hci_rp_read_rssi *rp = data;
2184 	struct hci_conn *conn;
2185 
2186 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2187 
2188 	if (rp->status)
2189 		return rp->status;
2190 
2191 	hci_dev_lock(hdev);
2192 
2193 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2194 	if (conn)
2195 		conn->rssi = rp->rssi;
2196 
2197 	hci_dev_unlock(hdev);
2198 
2199 	return rp->status;
2200 }
2201 
hci_cc_read_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)2202 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2203 			       struct sk_buff *skb)
2204 {
2205 	struct hci_cp_read_tx_power *sent;
2206 	struct hci_rp_read_tx_power *rp = data;
2207 	struct hci_conn *conn;
2208 
2209 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2210 
2211 	if (rp->status)
2212 		return rp->status;
2213 
2214 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2215 	if (!sent)
2216 		return rp->status;
2217 
2218 	hci_dev_lock(hdev);
2219 
2220 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2221 	if (!conn)
2222 		goto unlock;
2223 
2224 	switch (sent->type) {
2225 	case 0x00:
2226 		conn->tx_power = rp->tx_power;
2227 		break;
2228 	case 0x01:
2229 		conn->max_tx_power = rp->tx_power;
2230 		break;
2231 	}
2232 
2233 unlock:
2234 	hci_dev_unlock(hdev);
2235 	return rp->status;
2236 }
2237 
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)2238 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2239 				      struct sk_buff *skb)
2240 {
2241 	struct hci_ev_status *rp = data;
2242 	u8 *mode;
2243 
2244 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2245 
2246 	if (rp->status)
2247 		return rp->status;
2248 
2249 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2250 	if (mode)
2251 		hdev->ssp_debug_mode = *mode;
2252 
2253 	return rp->status;
2254 }
2255 
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)2256 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2257 {
2258 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2259 
2260 	if (status) {
2261 		hci_conn_check_pending(hdev);
2262 		return;
2263 	}
2264 
2265 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2266 		set_bit(HCI_INQUIRY, &hdev->flags);
2267 }
2268 
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)2269 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2270 {
2271 	struct hci_cp_create_conn *cp;
2272 	struct hci_conn *conn;
2273 
2274 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2275 
2276 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2277 	if (!cp)
2278 		return;
2279 
2280 	hci_dev_lock(hdev);
2281 
2282 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2283 
2284 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2285 
2286 	if (status) {
2287 		if (conn && conn->state == BT_CONNECT) {
2288 			if (status != 0x0c || conn->attempt > 2) {
2289 				conn->state = BT_CLOSED;
2290 				hci_connect_cfm(conn, status);
2291 				hci_conn_del(conn);
2292 			} else
2293 				conn->state = BT_CONNECT2;
2294 		}
2295 	} else {
2296 		if (!conn) {
2297 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2298 						  HCI_ROLE_MASTER);
2299 			if (IS_ERR(conn))
2300 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2301 		}
2302 	}
2303 
2304 	hci_dev_unlock(hdev);
2305 }
2306 
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)2307 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2308 {
2309 	struct hci_cp_add_sco *cp;
2310 	struct hci_conn *acl;
2311 	struct hci_link *link;
2312 	__u16 handle;
2313 
2314 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2315 
2316 	if (!status)
2317 		return;
2318 
2319 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2320 	if (!cp)
2321 		return;
2322 
2323 	handle = __le16_to_cpu(cp->handle);
2324 
2325 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2326 
2327 	hci_dev_lock(hdev);
2328 
2329 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2330 	if (acl) {
2331 		link = list_first_entry_or_null(&acl->link_list,
2332 						struct hci_link, list);
2333 		if (link && link->conn) {
2334 			link->conn->state = BT_CLOSED;
2335 
2336 			hci_connect_cfm(link->conn, status);
2337 			hci_conn_del(link->conn);
2338 		}
2339 	}
2340 
2341 	hci_dev_unlock(hdev);
2342 }
2343 
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2344 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2345 {
2346 	struct hci_cp_auth_requested *cp;
2347 	struct hci_conn *conn;
2348 
2349 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2350 
2351 	if (!status)
2352 		return;
2353 
2354 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2355 	if (!cp)
2356 		return;
2357 
2358 	hci_dev_lock(hdev);
2359 
2360 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2361 	if (conn) {
2362 		if (conn->state == BT_CONFIG) {
2363 			hci_connect_cfm(conn, status);
2364 			hci_conn_drop(conn);
2365 		}
2366 	}
2367 
2368 	hci_dev_unlock(hdev);
2369 }
2370 
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2371 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2372 {
2373 	struct hci_cp_set_conn_encrypt *cp;
2374 	struct hci_conn *conn;
2375 
2376 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2377 
2378 	if (!status)
2379 		return;
2380 
2381 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2382 	if (!cp)
2383 		return;
2384 
2385 	hci_dev_lock(hdev);
2386 
2387 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2388 	if (conn) {
2389 		if (conn->state == BT_CONFIG) {
2390 			hci_connect_cfm(conn, status);
2391 			hci_conn_drop(conn);
2392 		}
2393 	}
2394 
2395 	hci_dev_unlock(hdev);
2396 }
2397 
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2398 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2399 				    struct hci_conn *conn)
2400 {
2401 	if (conn->state != BT_CONFIG || !conn->out)
2402 		return 0;
2403 
2404 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2405 		return 0;
2406 
2407 	/* Only request authentication for SSP connections or non-SSP
2408 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2409 	 * is requested.
2410 	 */
2411 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2412 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2413 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2414 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2415 		return 0;
2416 
2417 	return 1;
2418 }
2419 
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2420 static int hci_resolve_name(struct hci_dev *hdev,
2421 				   struct inquiry_entry *e)
2422 {
2423 	struct hci_cp_remote_name_req cp;
2424 
2425 	memset(&cp, 0, sizeof(cp));
2426 
2427 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2428 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2429 	cp.pscan_mode = e->data.pscan_mode;
2430 	cp.clock_offset = e->data.clock_offset;
2431 
2432 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2433 }
2434 
hci_resolve_next_name(struct hci_dev * hdev)2435 static bool hci_resolve_next_name(struct hci_dev *hdev)
2436 {
2437 	struct discovery_state *discov = &hdev->discovery;
2438 	struct inquiry_entry *e;
2439 
2440 	if (list_empty(&discov->resolve))
2441 		return false;
2442 
2443 	/* We should stop if we already spent too much time resolving names. */
2444 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2445 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2446 		return false;
2447 	}
2448 
2449 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2450 	if (!e)
2451 		return false;
2452 
2453 	if (hci_resolve_name(hdev, e) == 0) {
2454 		e->name_state = NAME_PENDING;
2455 		return true;
2456 	}
2457 
2458 	return false;
2459 }
2460 
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2461 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2462 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2463 {
2464 	struct discovery_state *discov = &hdev->discovery;
2465 	struct inquiry_entry *e;
2466 
2467 	/* Update the mgmt connected state if necessary. Be careful with
2468 	 * conn objects that exist but are not (yet) connected however.
2469 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2470 	 * considered connected.
2471 	 */
2472 	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2473 		mgmt_device_connected(hdev, conn, name, name_len);
2474 
2475 	if (discov->state == DISCOVERY_STOPPED)
2476 		return;
2477 
2478 	if (discov->state == DISCOVERY_STOPPING)
2479 		goto discov_complete;
2480 
2481 	if (discov->state != DISCOVERY_RESOLVING)
2482 		return;
2483 
2484 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2485 	/* If the device was not found in a list of found devices names of which
2486 	 * are pending. there is no need to continue resolving a next name as it
2487 	 * will be done upon receiving another Remote Name Request Complete
2488 	 * Event */
2489 	if (!e)
2490 		return;
2491 
2492 	list_del(&e->list);
2493 
2494 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2495 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2496 			 name, name_len);
2497 
2498 	if (hci_resolve_next_name(hdev))
2499 		return;
2500 
2501 discov_complete:
2502 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2503 }
2504 
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2505 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2506 {
2507 	struct hci_cp_remote_name_req *cp;
2508 	struct hci_conn *conn;
2509 
2510 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2511 
2512 	/* If successful wait for the name req complete event before
2513 	 * checking for the need to do authentication */
2514 	if (!status)
2515 		return;
2516 
2517 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2518 	if (!cp)
2519 		return;
2520 
2521 	hci_dev_lock(hdev);
2522 
2523 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2524 
2525 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2526 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2527 
2528 	if (!conn)
2529 		goto unlock;
2530 
2531 	if (!hci_outgoing_auth_needed(hdev, conn))
2532 		goto unlock;
2533 
2534 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2535 		struct hci_cp_auth_requested auth_cp;
2536 
2537 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2538 
2539 		auth_cp.handle = __cpu_to_le16(conn->handle);
2540 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2541 			     sizeof(auth_cp), &auth_cp);
2542 	}
2543 
2544 unlock:
2545 	hci_dev_unlock(hdev);
2546 }
2547 
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2548 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2549 {
2550 	struct hci_cp_read_remote_features *cp;
2551 	struct hci_conn *conn;
2552 
2553 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2554 
2555 	if (!status)
2556 		return;
2557 
2558 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2559 	if (!cp)
2560 		return;
2561 
2562 	hci_dev_lock(hdev);
2563 
2564 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2565 	if (conn) {
2566 		if (conn->state == BT_CONFIG) {
2567 			hci_connect_cfm(conn, status);
2568 			hci_conn_drop(conn);
2569 		}
2570 	}
2571 
2572 	hci_dev_unlock(hdev);
2573 }
2574 
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2575 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2576 {
2577 	struct hci_cp_read_remote_ext_features *cp;
2578 	struct hci_conn *conn;
2579 
2580 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2581 
2582 	if (!status)
2583 		return;
2584 
2585 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2586 	if (!cp)
2587 		return;
2588 
2589 	hci_dev_lock(hdev);
2590 
2591 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2592 	if (conn) {
2593 		if (conn->state == BT_CONFIG) {
2594 			hci_connect_cfm(conn, status);
2595 			hci_conn_drop(conn);
2596 		}
2597 	}
2598 
2599 	hci_dev_unlock(hdev);
2600 }
2601 
hci_setup_sync_conn_status(struct hci_dev * hdev,__u16 handle,__u8 status)2602 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2603 				       __u8 status)
2604 {
2605 	struct hci_conn *acl;
2606 	struct hci_link *link;
2607 
2608 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2609 
2610 	hci_dev_lock(hdev);
2611 
2612 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2613 	if (acl) {
2614 		link = list_first_entry_or_null(&acl->link_list,
2615 						struct hci_link, list);
2616 		if (link && link->conn) {
2617 			link->conn->state = BT_CLOSED;
2618 
2619 			hci_connect_cfm(link->conn, status);
2620 			hci_conn_del(link->conn);
2621 		}
2622 	}
2623 
2624 	hci_dev_unlock(hdev);
2625 }
2626 
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2627 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2628 {
2629 	struct hci_cp_setup_sync_conn *cp;
2630 
2631 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2632 
2633 	if (!status)
2634 		return;
2635 
2636 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2637 	if (!cp)
2638 		return;
2639 
2640 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2641 }
2642 
hci_cs_enhanced_setup_sync_conn(struct hci_dev * hdev,__u8 status)2643 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2644 {
2645 	struct hci_cp_enhanced_setup_sync_conn *cp;
2646 
2647 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2648 
2649 	if (!status)
2650 		return;
2651 
2652 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2653 	if (!cp)
2654 		return;
2655 
2656 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2657 }
2658 
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2659 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2660 {
2661 	struct hci_cp_sniff_mode *cp;
2662 	struct hci_conn *conn;
2663 
2664 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2665 
2666 	if (!status)
2667 		return;
2668 
2669 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2670 	if (!cp)
2671 		return;
2672 
2673 	hci_dev_lock(hdev);
2674 
2675 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2676 	if (conn) {
2677 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2678 
2679 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2680 			hci_sco_setup(conn, status);
2681 	}
2682 
2683 	hci_dev_unlock(hdev);
2684 }
2685 
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2686 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2687 {
2688 	struct hci_cp_exit_sniff_mode *cp;
2689 	struct hci_conn *conn;
2690 
2691 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2692 
2693 	if (!status)
2694 		return;
2695 
2696 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2697 	if (!cp)
2698 		return;
2699 
2700 	hci_dev_lock(hdev);
2701 
2702 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2703 	if (conn) {
2704 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2705 
2706 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2707 			hci_sco_setup(conn, status);
2708 	}
2709 
2710 	hci_dev_unlock(hdev);
2711 }
2712 
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2713 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2714 {
2715 	struct hci_cp_disconnect *cp;
2716 	struct hci_conn_params *params;
2717 	struct hci_conn *conn;
2718 	bool mgmt_conn;
2719 
2720 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2721 
2722 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2723 	 * otherwise cleanup the connection immediately.
2724 	 */
2725 	if (!status && !hdev->suspended)
2726 		return;
2727 
2728 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2729 	if (!cp)
2730 		return;
2731 
2732 	hci_dev_lock(hdev);
2733 
2734 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2735 	if (!conn)
2736 		goto unlock;
2737 
2738 	if (status) {
2739 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2740 				       conn->dst_type, status);
2741 
2742 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2743 			hdev->cur_adv_instance = conn->adv_instance;
2744 			hci_enable_advertising(hdev);
2745 		}
2746 
2747 		/* Inform sockets conn is gone before we delete it */
2748 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2749 
2750 		goto done;
2751 	}
2752 
2753 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2754 
2755 	if (conn->type == ACL_LINK) {
2756 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2757 			hci_remove_link_key(hdev, &conn->dst);
2758 	}
2759 
2760 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2761 	if (params) {
2762 		switch (params->auto_connect) {
2763 		case HCI_AUTO_CONN_LINK_LOSS:
2764 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2765 				break;
2766 			fallthrough;
2767 
2768 		case HCI_AUTO_CONN_DIRECT:
2769 		case HCI_AUTO_CONN_ALWAYS:
2770 			hci_pend_le_list_del_init(params);
2771 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2772 			break;
2773 
2774 		default:
2775 			break;
2776 		}
2777 	}
2778 
2779 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2780 				 cp->reason, mgmt_conn);
2781 
2782 	hci_disconn_cfm(conn, cp->reason);
2783 
2784 done:
2785 	/* If the disconnection failed for any reason, the upper layer
2786 	 * does not retry to disconnect in current implementation.
2787 	 * Hence, we need to do some basic cleanup here and re-enable
2788 	 * advertising if necessary.
2789 	 */
2790 	hci_conn_del(conn);
2791 unlock:
2792 	hci_dev_unlock(hdev);
2793 }
2794 
ev_bdaddr_type(struct hci_dev * hdev,u8 type,bool * resolved)2795 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2796 {
2797 	/* When using controller based address resolution, then the new
2798 	 * address types 0x02 and 0x03 are used. These types need to be
2799 	 * converted back into either public address or random address type
2800 	 */
2801 	switch (type) {
2802 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2803 		if (resolved)
2804 			*resolved = true;
2805 		return ADDR_LE_DEV_PUBLIC;
2806 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2807 		if (resolved)
2808 			*resolved = true;
2809 		return ADDR_LE_DEV_RANDOM;
2810 	}
2811 
2812 	if (resolved)
2813 		*resolved = false;
2814 	return type;
2815 }
2816 
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2817 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2818 			      u8 peer_addr_type, u8 own_address_type,
2819 			      u8 filter_policy)
2820 {
2821 	struct hci_conn *conn;
2822 
2823 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2824 				       peer_addr_type);
2825 	if (!conn)
2826 		return;
2827 
2828 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2829 
2830 	/* Store the initiator and responder address information which
2831 	 * is needed for SMP. These values will not change during the
2832 	 * lifetime of the connection.
2833 	 */
2834 	conn->init_addr_type = own_address_type;
2835 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2836 		bacpy(&conn->init_addr, &hdev->random_addr);
2837 	else
2838 		bacpy(&conn->init_addr, &hdev->bdaddr);
2839 
2840 	conn->resp_addr_type = peer_addr_type;
2841 	bacpy(&conn->resp_addr, peer_addr);
2842 }
2843 
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2844 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2845 {
2846 	struct hci_cp_le_create_conn *cp;
2847 
2848 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2849 
2850 	/* All connection failure handling is taken care of by the
2851 	 * hci_conn_failed function which is triggered by the HCI
2852 	 * request completion callbacks used for connecting.
2853 	 */
2854 	if (status)
2855 		return;
2856 
2857 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2858 	if (!cp)
2859 		return;
2860 
2861 	hci_dev_lock(hdev);
2862 
2863 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2864 			  cp->own_address_type, cp->filter_policy);
2865 
2866 	hci_dev_unlock(hdev);
2867 }
2868 
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2869 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2870 {
2871 	struct hci_cp_le_ext_create_conn *cp;
2872 
2873 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2874 
2875 	/* All connection failure handling is taken care of by the
2876 	 * hci_conn_failed function which is triggered by the HCI
2877 	 * request completion callbacks used for connecting.
2878 	 */
2879 	if (status)
2880 		return;
2881 
2882 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2883 	if (!cp)
2884 		return;
2885 
2886 	hci_dev_lock(hdev);
2887 
2888 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2889 			  cp->own_addr_type, cp->filter_policy);
2890 
2891 	hci_dev_unlock(hdev);
2892 }
2893 
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2894 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2895 {
2896 	struct hci_cp_le_read_remote_features *cp;
2897 	struct hci_conn *conn;
2898 
2899 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2900 
2901 	if (!status)
2902 		return;
2903 
2904 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2905 	if (!cp)
2906 		return;
2907 
2908 	hci_dev_lock(hdev);
2909 
2910 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2911 	if (conn) {
2912 		if (conn->state == BT_CONFIG) {
2913 			hci_connect_cfm(conn, status);
2914 			hci_conn_drop(conn);
2915 		}
2916 	}
2917 
2918 	hci_dev_unlock(hdev);
2919 }
2920 
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2921 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2922 {
2923 	struct hci_cp_le_start_enc *cp;
2924 	struct hci_conn *conn;
2925 
2926 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2927 
2928 	if (!status)
2929 		return;
2930 
2931 	hci_dev_lock(hdev);
2932 
2933 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2934 	if (!cp)
2935 		goto unlock;
2936 
2937 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2938 	if (!conn)
2939 		goto unlock;
2940 
2941 	if (conn->state != BT_CONNECTED)
2942 		goto unlock;
2943 
2944 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2945 	hci_conn_drop(conn);
2946 
2947 unlock:
2948 	hci_dev_unlock(hdev);
2949 }
2950 
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2951 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2952 {
2953 	struct hci_cp_switch_role *cp;
2954 	struct hci_conn *conn;
2955 
2956 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2957 
2958 	if (!status)
2959 		return;
2960 
2961 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2962 	if (!cp)
2963 		return;
2964 
2965 	hci_dev_lock(hdev);
2966 
2967 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2968 	if (conn)
2969 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2970 
2971 	hci_dev_unlock(hdev);
2972 }
2973 
hci_inquiry_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)2974 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2975 				     struct sk_buff *skb)
2976 {
2977 	struct hci_ev_status *ev = data;
2978 	struct discovery_state *discov = &hdev->discovery;
2979 	struct inquiry_entry *e;
2980 
2981 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2982 
2983 	hci_conn_check_pending(hdev);
2984 
2985 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2986 		return;
2987 
2988 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2989 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2990 
2991 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2992 		return;
2993 
2994 	hci_dev_lock(hdev);
2995 
2996 	if (discov->state != DISCOVERY_FINDING)
2997 		goto unlock;
2998 
2999 	if (list_empty(&discov->resolve)) {
3000 		/* When BR/EDR inquiry is active and no LE scanning is in
3001 		 * progress, then change discovery state to indicate completion.
3002 		 *
3003 		 * When running LE scanning and BR/EDR inquiry simultaneously
3004 		 * and the LE scan already finished, then change the discovery
3005 		 * state to indicate completion.
3006 		 */
3007 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3008 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3009 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3010 		goto unlock;
3011 	}
3012 
3013 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3014 	if (e && hci_resolve_name(hdev, e) == 0) {
3015 		e->name_state = NAME_PENDING;
3016 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3017 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3018 	} else {
3019 		/* When BR/EDR inquiry is active and no LE scanning is in
3020 		 * progress, then change discovery state to indicate completion.
3021 		 *
3022 		 * When running LE scanning and BR/EDR inquiry simultaneously
3023 		 * and the LE scan already finished, then change the discovery
3024 		 * state to indicate completion.
3025 		 */
3026 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3027 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3028 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3029 	}
3030 
3031 unlock:
3032 	hci_dev_unlock(hdev);
3033 }
3034 
hci_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)3035 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3036 				   struct sk_buff *skb)
3037 {
3038 	struct hci_ev_inquiry_result *ev = edata;
3039 	struct inquiry_data data;
3040 	int i;
3041 
3042 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3043 			     flex_array_size(ev, info, ev->num)))
3044 		return;
3045 
3046 	bt_dev_dbg(hdev, "num %d", ev->num);
3047 
3048 	if (!ev->num)
3049 		return;
3050 
3051 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3052 		return;
3053 
3054 	hci_dev_lock(hdev);
3055 
3056 	for (i = 0; i < ev->num; i++) {
3057 		struct inquiry_info *info = &ev->info[i];
3058 		u32 flags;
3059 
3060 		bacpy(&data.bdaddr, &info->bdaddr);
3061 		data.pscan_rep_mode	= info->pscan_rep_mode;
3062 		data.pscan_period_mode	= info->pscan_period_mode;
3063 		data.pscan_mode		= info->pscan_mode;
3064 		memcpy(data.dev_class, info->dev_class, 3);
3065 		data.clock_offset	= info->clock_offset;
3066 		data.rssi		= HCI_RSSI_INVALID;
3067 		data.ssp_mode		= 0x00;
3068 
3069 		flags = hci_inquiry_cache_update(hdev, &data, false);
3070 
3071 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3072 				  info->dev_class, HCI_RSSI_INVALID,
3073 				  flags, NULL, 0, NULL, 0, 0);
3074 	}
3075 
3076 	hci_dev_unlock(hdev);
3077 }
3078 
hci_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3079 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3080 				  struct sk_buff *skb)
3081 {
3082 	struct hci_ev_conn_complete *ev = data;
3083 	struct hci_conn *conn;
3084 	u8 status = ev->status;
3085 
3086 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3087 
3088 	hci_dev_lock(hdev);
3089 
3090 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3091 	if (!conn) {
3092 		/* In case of error status and there is no connection pending
3093 		 * just unlock as there is nothing to cleanup.
3094 		 */
3095 		if (ev->status)
3096 			goto unlock;
3097 
3098 		/* Connection may not exist if auto-connected. Check the bredr
3099 		 * allowlist to see if this device is allowed to auto connect.
3100 		 * If link is an ACL type, create a connection class
3101 		 * automatically.
3102 		 *
3103 		 * Auto-connect will only occur if the event filter is
3104 		 * programmed with a given address. Right now, event filter is
3105 		 * only used during suspend.
3106 		 */
3107 		if (ev->link_type == ACL_LINK &&
3108 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3109 						      &ev->bdaddr,
3110 						      BDADDR_BREDR)) {
3111 			conn = hci_conn_add_unset(hdev, ev->link_type,
3112 						  &ev->bdaddr, HCI_ROLE_SLAVE);
3113 			if (IS_ERR(conn)) {
3114 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3115 				goto unlock;
3116 			}
3117 		} else {
3118 			if (ev->link_type != SCO_LINK)
3119 				goto unlock;
3120 
3121 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3122 						       &ev->bdaddr);
3123 			if (!conn)
3124 				goto unlock;
3125 
3126 			conn->type = SCO_LINK;
3127 		}
3128 	}
3129 
3130 	/* The HCI_Connection_Complete event is only sent once per connection.
3131 	 * Processing it more than once per connection can corrupt kernel memory.
3132 	 *
3133 	 * As the connection handle is set here for the first time, it indicates
3134 	 * whether the connection is already set up.
3135 	 */
3136 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3137 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3138 		goto unlock;
3139 	}
3140 
3141 	if (!status) {
3142 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3143 		if (status)
3144 			goto done;
3145 
3146 		if (conn->type == ACL_LINK) {
3147 			conn->state = BT_CONFIG;
3148 			hci_conn_hold(conn);
3149 
3150 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3151 			    !hci_find_link_key(hdev, &ev->bdaddr))
3152 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3153 			else
3154 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3155 		} else
3156 			conn->state = BT_CONNECTED;
3157 
3158 		hci_debugfs_create_conn(conn);
3159 		hci_conn_add_sysfs(conn);
3160 
3161 		if (test_bit(HCI_AUTH, &hdev->flags))
3162 			set_bit(HCI_CONN_AUTH, &conn->flags);
3163 
3164 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3165 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3166 
3167 		/* "Link key request" completed ahead of "connect request" completes */
3168 		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3169 		    ev->link_type == ACL_LINK) {
3170 			struct link_key *key;
3171 			struct hci_cp_read_enc_key_size cp;
3172 
3173 			key = hci_find_link_key(hdev, &ev->bdaddr);
3174 			if (key) {
3175 				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3176 
3177 				if (!read_key_size_capable(hdev)) {
3178 					conn->enc_key_size = HCI_LINK_KEY_SIZE;
3179 				} else {
3180 					cp.handle = cpu_to_le16(conn->handle);
3181 					if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3182 							 sizeof(cp), &cp)) {
3183 						bt_dev_err(hdev, "sending read key size failed");
3184 						conn->enc_key_size = HCI_LINK_KEY_SIZE;
3185 					}
3186 				}
3187 
3188 				hci_encrypt_cfm(conn, ev->status);
3189 			}
3190 		}
3191 
3192 		/* Get remote features */
3193 		if (conn->type == ACL_LINK) {
3194 			struct hci_cp_read_remote_features cp;
3195 			cp.handle = ev->handle;
3196 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3197 				     sizeof(cp), &cp);
3198 
3199 			hci_update_scan(hdev);
3200 		}
3201 
3202 		/* Set packet type for incoming connection */
3203 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3204 			struct hci_cp_change_conn_ptype cp;
3205 			cp.handle = ev->handle;
3206 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3207 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3208 				     &cp);
3209 		}
3210 	}
3211 
3212 	if (conn->type == ACL_LINK)
3213 		hci_sco_setup(conn, ev->status);
3214 
3215 done:
3216 	if (status) {
3217 		hci_conn_failed(conn, status);
3218 	} else if (ev->link_type == SCO_LINK) {
3219 		switch (conn->setting & SCO_AIRMODE_MASK) {
3220 		case SCO_AIRMODE_CVSD:
3221 			if (hdev->notify)
3222 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3223 			break;
3224 		}
3225 
3226 		hci_connect_cfm(conn, status);
3227 	}
3228 
3229 unlock:
3230 	hci_dev_unlock(hdev);
3231 
3232 	hci_conn_check_pending(hdev);
3233 }
3234 
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)3235 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3236 {
3237 	struct hci_cp_reject_conn_req cp;
3238 
3239 	bacpy(&cp.bdaddr, bdaddr);
3240 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3241 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3242 }
3243 
hci_conn_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3244 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3245 				 struct sk_buff *skb)
3246 {
3247 	struct hci_ev_conn_request *ev = data;
3248 	int mask = hdev->link_mode;
3249 	struct inquiry_entry *ie;
3250 	struct hci_conn *conn;
3251 	__u8 flags = 0;
3252 
3253 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3254 
3255 	/* Reject incoming connection from device with same BD ADDR against
3256 	 * CVE-2020-26555
3257 	 */
3258 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3259 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3260 			   &ev->bdaddr);
3261 		hci_reject_conn(hdev, &ev->bdaddr);
3262 		return;
3263 	}
3264 
3265 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3266 				      &flags);
3267 
3268 	if (!(mask & HCI_LM_ACCEPT)) {
3269 		hci_reject_conn(hdev, &ev->bdaddr);
3270 		return;
3271 	}
3272 
3273 	hci_dev_lock(hdev);
3274 
3275 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3276 				   BDADDR_BREDR)) {
3277 		hci_reject_conn(hdev, &ev->bdaddr);
3278 		goto unlock;
3279 	}
3280 
3281 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3282 	 * connection. These features are only touched through mgmt so
3283 	 * only do the checks if HCI_MGMT is set.
3284 	 */
3285 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3286 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3287 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3288 					       BDADDR_BREDR)) {
3289 		hci_reject_conn(hdev, &ev->bdaddr);
3290 		goto unlock;
3291 	}
3292 
3293 	/* Connection accepted */
3294 
3295 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3296 	if (ie)
3297 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3298 
3299 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3300 			&ev->bdaddr);
3301 	if (!conn) {
3302 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3303 					  HCI_ROLE_SLAVE);
3304 		if (IS_ERR(conn)) {
3305 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3306 			goto unlock;
3307 		}
3308 	}
3309 
3310 	memcpy(conn->dev_class, ev->dev_class, 3);
3311 
3312 	hci_dev_unlock(hdev);
3313 
3314 	if (ev->link_type == ACL_LINK ||
3315 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3316 		struct hci_cp_accept_conn_req cp;
3317 		conn->state = BT_CONNECT;
3318 
3319 		bacpy(&cp.bdaddr, &ev->bdaddr);
3320 
3321 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3322 			cp.role = 0x00; /* Become central */
3323 		else
3324 			cp.role = 0x01; /* Remain peripheral */
3325 
3326 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3327 	} else if (!(flags & HCI_PROTO_DEFER)) {
3328 		struct hci_cp_accept_sync_conn_req cp;
3329 		conn->state = BT_CONNECT;
3330 
3331 		bacpy(&cp.bdaddr, &ev->bdaddr);
3332 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3333 
3334 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3335 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3336 		cp.max_latency    = cpu_to_le16(0xffff);
3337 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3338 		cp.retrans_effort = 0xff;
3339 
3340 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3341 			     &cp);
3342 	} else {
3343 		conn->state = BT_CONNECT2;
3344 		hci_connect_cfm(conn, 0);
3345 	}
3346 
3347 	return;
3348 unlock:
3349 	hci_dev_unlock(hdev);
3350 }
3351 
hci_to_mgmt_reason(u8 err)3352 static u8 hci_to_mgmt_reason(u8 err)
3353 {
3354 	switch (err) {
3355 	case HCI_ERROR_CONNECTION_TIMEOUT:
3356 		return MGMT_DEV_DISCONN_TIMEOUT;
3357 	case HCI_ERROR_REMOTE_USER_TERM:
3358 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3359 	case HCI_ERROR_REMOTE_POWER_OFF:
3360 		return MGMT_DEV_DISCONN_REMOTE;
3361 	case HCI_ERROR_LOCAL_HOST_TERM:
3362 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3363 	default:
3364 		return MGMT_DEV_DISCONN_UNKNOWN;
3365 	}
3366 }
3367 
hci_disconn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3368 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3369 				     struct sk_buff *skb)
3370 {
3371 	struct hci_ev_disconn_complete *ev = data;
3372 	u8 reason;
3373 	struct hci_conn_params *params;
3374 	struct hci_conn *conn;
3375 	bool mgmt_connected;
3376 
3377 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3378 
3379 	hci_dev_lock(hdev);
3380 
3381 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3382 	if (!conn)
3383 		goto unlock;
3384 
3385 	if (ev->status) {
3386 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3387 				       conn->dst_type, ev->status);
3388 		goto unlock;
3389 	}
3390 
3391 	conn->state = BT_CLOSED;
3392 
3393 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3394 
3395 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3396 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3397 	else
3398 		reason = hci_to_mgmt_reason(ev->reason);
3399 
3400 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3401 				reason, mgmt_connected);
3402 
3403 	if (conn->type == ACL_LINK) {
3404 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3405 			hci_remove_link_key(hdev, &conn->dst);
3406 
3407 		hci_update_scan(hdev);
3408 	}
3409 
3410 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3411 	if (params) {
3412 		switch (params->auto_connect) {
3413 		case HCI_AUTO_CONN_LINK_LOSS:
3414 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3415 				break;
3416 			fallthrough;
3417 
3418 		case HCI_AUTO_CONN_DIRECT:
3419 		case HCI_AUTO_CONN_ALWAYS:
3420 			hci_pend_le_list_del_init(params);
3421 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3422 			hci_update_passive_scan(hdev);
3423 			break;
3424 
3425 		default:
3426 			break;
3427 		}
3428 	}
3429 
3430 	hci_disconn_cfm(conn, ev->reason);
3431 
3432 	/* Re-enable advertising if necessary, since it might
3433 	 * have been disabled by the connection. From the
3434 	 * HCI_LE_Set_Advertise_Enable command description in
3435 	 * the core specification (v4.0):
3436 	 * "The Controller shall continue advertising until the Host
3437 	 * issues an LE_Set_Advertise_Enable command with
3438 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3439 	 * or until a connection is created or until the Advertising
3440 	 * is timed out due to Directed Advertising."
3441 	 */
3442 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3443 		hdev->cur_adv_instance = conn->adv_instance;
3444 		hci_enable_advertising(hdev);
3445 	}
3446 
3447 	hci_conn_del(conn);
3448 
3449 unlock:
3450 	hci_dev_unlock(hdev);
3451 }
3452 
hci_auth_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3453 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3454 				  struct sk_buff *skb)
3455 {
3456 	struct hci_ev_auth_complete *ev = data;
3457 	struct hci_conn *conn;
3458 
3459 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3460 
3461 	hci_dev_lock(hdev);
3462 
3463 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3464 	if (!conn)
3465 		goto unlock;
3466 
3467 	if (!ev->status) {
3468 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3469 		set_bit(HCI_CONN_AUTH, &conn->flags);
3470 		conn->sec_level = conn->pending_sec_level;
3471 	} else {
3472 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3473 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3474 
3475 		mgmt_auth_failed(conn, ev->status);
3476 	}
3477 
3478 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3479 
3480 	if (conn->state == BT_CONFIG) {
3481 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3482 			struct hci_cp_set_conn_encrypt cp;
3483 			cp.handle  = ev->handle;
3484 			cp.encrypt = 0x01;
3485 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3486 				     &cp);
3487 		} else {
3488 			conn->state = BT_CONNECTED;
3489 			hci_connect_cfm(conn, ev->status);
3490 			hci_conn_drop(conn);
3491 		}
3492 	} else {
3493 		hci_auth_cfm(conn, ev->status);
3494 
3495 		hci_conn_hold(conn);
3496 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3497 		hci_conn_drop(conn);
3498 	}
3499 
3500 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3501 		if (!ev->status) {
3502 			struct hci_cp_set_conn_encrypt cp;
3503 			cp.handle  = ev->handle;
3504 			cp.encrypt = 0x01;
3505 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3506 				     &cp);
3507 		} else {
3508 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3509 			hci_encrypt_cfm(conn, ev->status);
3510 		}
3511 	}
3512 
3513 unlock:
3514 	hci_dev_unlock(hdev);
3515 }
3516 
hci_remote_name_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3517 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3518 				struct sk_buff *skb)
3519 {
3520 	struct hci_ev_remote_name *ev = data;
3521 	struct hci_conn *conn;
3522 
3523 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3524 
3525 	hci_dev_lock(hdev);
3526 
3527 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3528 
3529 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3530 		goto check_auth;
3531 
3532 	if (ev->status == 0)
3533 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3534 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3535 	else
3536 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3537 
3538 check_auth:
3539 	if (!conn)
3540 		goto unlock;
3541 
3542 	if (!hci_outgoing_auth_needed(hdev, conn))
3543 		goto unlock;
3544 
3545 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3546 		struct hci_cp_auth_requested cp;
3547 
3548 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3549 
3550 		cp.handle = __cpu_to_le16(conn->handle);
3551 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3552 	}
3553 
3554 unlock:
3555 	hci_dev_unlock(hdev);
3556 }
3557 
hci_encrypt_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3558 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3559 				   struct sk_buff *skb)
3560 {
3561 	struct hci_ev_encrypt_change *ev = data;
3562 	struct hci_conn *conn;
3563 
3564 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3565 
3566 	hci_dev_lock(hdev);
3567 
3568 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3569 	if (!conn)
3570 		goto unlock;
3571 
3572 	if (!ev->status) {
3573 		if (ev->encrypt) {
3574 			/* Encryption implies authentication */
3575 			set_bit(HCI_CONN_AUTH, &conn->flags);
3576 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3577 			conn->sec_level = conn->pending_sec_level;
3578 
3579 			/* P-256 authentication key implies FIPS */
3580 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3581 				set_bit(HCI_CONN_FIPS, &conn->flags);
3582 
3583 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3584 			    conn->type == LE_LINK)
3585 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3586 		} else {
3587 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3588 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3589 		}
3590 	}
3591 
3592 	/* We should disregard the current RPA and generate a new one
3593 	 * whenever the encryption procedure fails.
3594 	 */
3595 	if (ev->status && conn->type == LE_LINK) {
3596 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3597 		hci_adv_instances_set_rpa_expired(hdev, true);
3598 	}
3599 
3600 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3601 
3602 	/* Check link security requirements are met */
3603 	if (!hci_conn_check_link_mode(conn))
3604 		ev->status = HCI_ERROR_AUTH_FAILURE;
3605 
3606 	if (ev->status && conn->state == BT_CONNECTED) {
3607 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3608 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3609 
3610 		/* Notify upper layers so they can cleanup before
3611 		 * disconnecting.
3612 		 */
3613 		hci_encrypt_cfm(conn, ev->status);
3614 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3615 		hci_conn_drop(conn);
3616 		goto unlock;
3617 	}
3618 
3619 	/* Try reading the encryption key size for encrypted ACL links */
3620 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3621 		struct hci_cp_read_enc_key_size cp;
3622 
3623 		/* Only send HCI_Read_Encryption_Key_Size if the
3624 		 * controller really supports it. If it doesn't, assume
3625 		 * the default size (16).
3626 		 */
3627 		if (!read_key_size_capable(hdev)) {
3628 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3629 			goto notify;
3630 		}
3631 
3632 		cp.handle = cpu_to_le16(conn->handle);
3633 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3634 				 sizeof(cp), &cp)) {
3635 			bt_dev_err(hdev, "sending read key size failed");
3636 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3637 			goto notify;
3638 		}
3639 
3640 		goto unlock;
3641 	}
3642 
3643 	/* Set the default Authenticated Payload Timeout after
3644 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3645 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3646 	 * sent when the link is active and Encryption is enabled, the conn
3647 	 * type can be either LE or ACL and controller must support LMP Ping.
3648 	 * Ensure for AES-CCM encryption as well.
3649 	 */
3650 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3651 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3652 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3653 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3654 		struct hci_cp_write_auth_payload_to cp;
3655 
3656 		cp.handle = cpu_to_le16(conn->handle);
3657 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3658 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3659 				 sizeof(cp), &cp))
3660 			bt_dev_err(hdev, "write auth payload timeout failed");
3661 	}
3662 
3663 notify:
3664 	hci_encrypt_cfm(conn, ev->status);
3665 
3666 unlock:
3667 	hci_dev_unlock(hdev);
3668 }
3669 
hci_change_link_key_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3670 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3671 					     struct sk_buff *skb)
3672 {
3673 	struct hci_ev_change_link_key_complete *ev = data;
3674 	struct hci_conn *conn;
3675 
3676 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3677 
3678 	hci_dev_lock(hdev);
3679 
3680 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3681 	if (conn) {
3682 		if (!ev->status)
3683 			set_bit(HCI_CONN_SECURE, &conn->flags);
3684 
3685 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3686 
3687 		hci_key_change_cfm(conn, ev->status);
3688 	}
3689 
3690 	hci_dev_unlock(hdev);
3691 }
3692 
hci_remote_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3693 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3694 				    struct sk_buff *skb)
3695 {
3696 	struct hci_ev_remote_features *ev = data;
3697 	struct hci_conn *conn;
3698 
3699 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3700 
3701 	hci_dev_lock(hdev);
3702 
3703 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3704 	if (!conn)
3705 		goto unlock;
3706 
3707 	if (!ev->status)
3708 		memcpy(conn->features[0], ev->features, 8);
3709 
3710 	if (conn->state != BT_CONFIG)
3711 		goto unlock;
3712 
3713 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3714 	    lmp_ext_feat_capable(conn)) {
3715 		struct hci_cp_read_remote_ext_features cp;
3716 		cp.handle = ev->handle;
3717 		cp.page = 0x01;
3718 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3719 			     sizeof(cp), &cp);
3720 		goto unlock;
3721 	}
3722 
3723 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3724 		struct hci_cp_remote_name_req cp;
3725 		memset(&cp, 0, sizeof(cp));
3726 		bacpy(&cp.bdaddr, &conn->dst);
3727 		cp.pscan_rep_mode = 0x02;
3728 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3729 	} else {
3730 		mgmt_device_connected(hdev, conn, NULL, 0);
3731 	}
3732 
3733 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3734 		conn->state = BT_CONNECTED;
3735 		hci_connect_cfm(conn, ev->status);
3736 		hci_conn_drop(conn);
3737 	}
3738 
3739 unlock:
3740 	hci_dev_unlock(hdev);
3741 }
3742 
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3743 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3744 {
3745 	cancel_delayed_work(&hdev->cmd_timer);
3746 
3747 	rcu_read_lock();
3748 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3749 		if (ncmd) {
3750 			cancel_delayed_work(&hdev->ncmd_timer);
3751 			atomic_set(&hdev->cmd_cnt, 1);
3752 		} else {
3753 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3754 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3755 						   HCI_NCMD_TIMEOUT);
3756 		}
3757 	}
3758 	rcu_read_unlock();
3759 }
3760 
hci_cc_le_read_buffer_size_v2(struct hci_dev * hdev,void * data,struct sk_buff * skb)3761 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3762 					struct sk_buff *skb)
3763 {
3764 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3765 
3766 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3767 
3768 	if (rp->status)
3769 		return rp->status;
3770 
3771 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3772 	hdev->le_pkts  = rp->acl_max_pkt;
3773 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3774 	hdev->iso_pkts = rp->iso_max_pkt;
3775 
3776 	hdev->le_cnt  = hdev->le_pkts;
3777 	hdev->iso_cnt = hdev->iso_pkts;
3778 
3779 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3780 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3781 
3782 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3783 		return HCI_ERROR_INVALID_PARAMETERS;
3784 
3785 	return rp->status;
3786 }
3787 
hci_unbound_cis_failed(struct hci_dev * hdev,u8 cig,u8 status)3788 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3789 {
3790 	struct hci_conn *conn, *tmp;
3791 
3792 	lockdep_assert_held(&hdev->lock);
3793 
3794 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3795 		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3796 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3797 			continue;
3798 
3799 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3800 			hci_conn_failed(conn, status);
3801 	}
3802 }
3803 
hci_cc_le_set_cig_params(struct hci_dev * hdev,void * data,struct sk_buff * skb)3804 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3805 				   struct sk_buff *skb)
3806 {
3807 	struct hci_rp_le_set_cig_params *rp = data;
3808 	struct hci_cp_le_set_cig_params *cp;
3809 	struct hci_conn *conn;
3810 	u8 status = rp->status;
3811 	bool pending = false;
3812 	int i;
3813 
3814 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3815 
3816 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3817 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3818 			    rp->cig_id != cp->cig_id)) {
3819 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3820 		status = HCI_ERROR_UNSPECIFIED;
3821 	}
3822 
3823 	hci_dev_lock(hdev);
3824 
3825 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3826 	 *
3827 	 * If the Status return parameter is non-zero, then the state of the CIG
3828 	 * and its CIS configurations shall not be changed by the command. If
3829 	 * the CIG did not already exist, it shall not be created.
3830 	 */
3831 	if (status) {
3832 		/* Keep current configuration, fail only the unbound CIS */
3833 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3834 		goto unlock;
3835 	}
3836 
3837 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3838 	 *
3839 	 * If the Status return parameter is zero, then the Controller shall
3840 	 * set the Connection_Handle arrayed return parameter to the connection
3841 	 * handle(s) corresponding to the CIS configurations specified in
3842 	 * the CIS_IDs command parameter, in the same order.
3843 	 */
3844 	for (i = 0; i < rp->num_handles; ++i) {
3845 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3846 						cp->cis[i].cis_id);
3847 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3848 			continue;
3849 
3850 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3851 			continue;
3852 
3853 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3854 			continue;
3855 
3856 		if (conn->state == BT_CONNECT)
3857 			pending = true;
3858 	}
3859 
3860 unlock:
3861 	if (pending)
3862 		hci_le_create_cis_pending(hdev);
3863 
3864 	hci_dev_unlock(hdev);
3865 
3866 	return rp->status;
3867 }
3868 
hci_cc_le_setup_iso_path(struct hci_dev * hdev,void * data,struct sk_buff * skb)3869 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3870 				   struct sk_buff *skb)
3871 {
3872 	struct hci_rp_le_setup_iso_path *rp = data;
3873 	struct hci_cp_le_setup_iso_path *cp;
3874 	struct hci_conn *conn;
3875 
3876 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3877 
3878 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3879 	if (!cp)
3880 		return rp->status;
3881 
3882 	hci_dev_lock(hdev);
3883 
3884 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3885 	if (!conn)
3886 		goto unlock;
3887 
3888 	if (rp->status) {
3889 		hci_connect_cfm(conn, rp->status);
3890 		hci_conn_del(conn);
3891 		goto unlock;
3892 	}
3893 
3894 	switch (cp->direction) {
3895 	/* Input (Host to Controller) */
3896 	case 0x00:
3897 		/* Only confirm connection if output only */
3898 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3899 			hci_connect_cfm(conn, rp->status);
3900 		break;
3901 	/* Output (Controller to Host) */
3902 	case 0x01:
3903 		/* Confirm connection since conn->iso_qos is always configured
3904 		 * last.
3905 		 */
3906 		hci_connect_cfm(conn, rp->status);
3907 
3908 		/* Notify device connected in case it is a BIG Sync */
3909 		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3910 			mgmt_device_connected(hdev, conn, NULL, 0);
3911 
3912 		break;
3913 	}
3914 
3915 unlock:
3916 	hci_dev_unlock(hdev);
3917 	return rp->status;
3918 }
3919 
hci_cs_le_create_big(struct hci_dev * hdev,u8 status)3920 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3921 {
3922 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3923 }
3924 
hci_cc_set_per_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)3925 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3926 				   struct sk_buff *skb)
3927 {
3928 	struct hci_ev_status *rp = data;
3929 	struct hci_cp_le_set_per_adv_params *cp;
3930 
3931 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3932 
3933 	if (rp->status)
3934 		return rp->status;
3935 
3936 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3937 	if (!cp)
3938 		return rp->status;
3939 
3940 	/* TODO: set the conn state */
3941 	return rp->status;
3942 }
3943 
hci_cc_le_set_per_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)3944 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3945 				       struct sk_buff *skb)
3946 {
3947 	struct hci_ev_status *rp = data;
3948 	struct hci_cp_le_set_per_adv_enable *cp;
3949 	struct adv_info *adv = NULL, *n;
3950 	u8 per_adv_cnt = 0;
3951 
3952 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3953 
3954 	if (rp->status)
3955 		return rp->status;
3956 
3957 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3958 	if (!cp)
3959 		return rp->status;
3960 
3961 	hci_dev_lock(hdev);
3962 
3963 	adv = hci_find_adv_instance(hdev, cp->handle);
3964 
3965 	if (cp->enable) {
3966 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3967 
3968 		if (adv)
3969 			adv->enabled = true;
3970 	} else {
3971 		/* If just one instance was disabled check if there are
3972 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3973 		 * The current periodic adv instance will be marked as
3974 		 * disabled once extended advertising is also disabled.
3975 		 */
3976 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3977 					 list) {
3978 			if (adv->periodic && adv->enabled)
3979 				per_adv_cnt++;
3980 		}
3981 
3982 		if (per_adv_cnt > 1)
3983 			goto unlock;
3984 
3985 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3986 	}
3987 
3988 unlock:
3989 	hci_dev_unlock(hdev);
3990 
3991 	return rp->status;
3992 }
3993 
3994 #define HCI_CC_VL(_op, _func, _min, _max) \
3995 { \
3996 	.op = _op, \
3997 	.func = _func, \
3998 	.min_len = _min, \
3999 	.max_len = _max, \
4000 }
4001 
4002 #define HCI_CC(_op, _func, _len) \
4003 	HCI_CC_VL(_op, _func, _len, _len)
4004 
4005 #define HCI_CC_STATUS(_op, _func) \
4006 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4007 
4008 static const struct hci_cc {
4009 	u16  op;
4010 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4011 	u16  min_len;
4012 	u16  max_len;
4013 } hci_cc_table[] = {
4014 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4015 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4016 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4017 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4018 		      hci_cc_remote_name_req_cancel),
4019 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4020 	       sizeof(struct hci_rp_role_discovery)),
4021 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4022 	       sizeof(struct hci_rp_read_link_policy)),
4023 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4024 	       sizeof(struct hci_rp_write_link_policy)),
4025 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4026 	       sizeof(struct hci_rp_read_def_link_policy)),
4027 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4028 		      hci_cc_write_def_link_policy),
4029 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4030 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4031 	       sizeof(struct hci_rp_read_stored_link_key)),
4032 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4033 	       sizeof(struct hci_rp_delete_stored_link_key)),
4034 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4035 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4036 	       sizeof(struct hci_rp_read_local_name)),
4037 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4038 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4039 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4040 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4041 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4042 	       sizeof(struct hci_rp_read_class_of_dev)),
4043 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4044 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4045 	       sizeof(struct hci_rp_read_voice_setting)),
4046 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4047 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4048 	       sizeof(struct hci_rp_read_num_supported_iac)),
4049 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4050 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4051 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4052 	       sizeof(struct hci_rp_read_auth_payload_to)),
4053 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4054 	       sizeof(struct hci_rp_write_auth_payload_to)),
4055 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4056 	       sizeof(struct hci_rp_read_local_version)),
4057 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4058 	       sizeof(struct hci_rp_read_local_commands)),
4059 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4060 	       sizeof(struct hci_rp_read_local_features)),
4061 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4062 	       sizeof(struct hci_rp_read_local_ext_features)),
4063 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4064 	       sizeof(struct hci_rp_read_buffer_size)),
4065 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4066 	       sizeof(struct hci_rp_read_bd_addr)),
4067 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4068 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4069 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4070 	       sizeof(struct hci_rp_read_page_scan_activity)),
4071 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4072 		      hci_cc_write_page_scan_activity),
4073 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4074 	       sizeof(struct hci_rp_read_page_scan_type)),
4075 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4076 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4077 	       sizeof(struct hci_rp_read_clock)),
4078 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4079 	       sizeof(struct hci_rp_read_enc_key_size)),
4080 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4081 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4082 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4083 	       hci_cc_read_def_err_data_reporting,
4084 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4085 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4086 		      hci_cc_write_def_err_data_reporting),
4087 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4088 	       sizeof(struct hci_rp_pin_code_reply)),
4089 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4090 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4091 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4092 	       sizeof(struct hci_rp_read_local_oob_data)),
4093 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4094 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4095 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4096 	       sizeof(struct hci_rp_le_read_buffer_size)),
4097 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4098 	       sizeof(struct hci_rp_le_read_local_features)),
4099 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4100 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4101 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4102 	       sizeof(struct hci_rp_user_confirm_reply)),
4103 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4104 	       sizeof(struct hci_rp_user_confirm_reply)),
4105 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4106 	       sizeof(struct hci_rp_user_confirm_reply)),
4107 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4108 	       sizeof(struct hci_rp_user_confirm_reply)),
4109 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4110 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4111 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4112 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4113 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4114 	       hci_cc_le_read_accept_list_size,
4115 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4116 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4117 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4118 		      hci_cc_le_add_to_accept_list),
4119 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4120 		      hci_cc_le_del_from_accept_list),
4121 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4122 	       sizeof(struct hci_rp_le_read_supported_states)),
4123 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4124 	       sizeof(struct hci_rp_le_read_def_data_len)),
4125 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4126 		      hci_cc_le_write_def_data_len),
4127 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4128 		      hci_cc_le_add_to_resolv_list),
4129 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4130 		      hci_cc_le_del_from_resolv_list),
4131 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4132 		      hci_cc_le_clear_resolv_list),
4133 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4134 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4135 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4136 		      hci_cc_le_set_addr_resolution_enable),
4137 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4138 	       sizeof(struct hci_rp_le_read_max_data_len)),
4139 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4140 		      hci_cc_write_le_host_supported),
4141 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4142 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4143 	       sizeof(struct hci_rp_read_rssi)),
4144 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4145 	       sizeof(struct hci_rp_read_tx_power)),
4146 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4147 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4148 		      hci_cc_le_set_ext_scan_param),
4149 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4150 		      hci_cc_le_set_ext_scan_enable),
4151 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4152 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4153 	       hci_cc_le_read_num_adv_sets,
4154 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4155 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4156 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4157 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4158 		      hci_cc_le_set_ext_adv_enable),
4159 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4160 		      hci_cc_le_set_adv_set_random_addr),
4161 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4162 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4163 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4164 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4165 		      hci_cc_le_set_per_adv_enable),
4166 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4167 	       sizeof(struct hci_rp_le_read_transmit_power)),
4168 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4169 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4170 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4171 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4172 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4173 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4174 	       sizeof(struct hci_rp_le_setup_iso_path)),
4175 };
4176 
hci_cc_func(struct hci_dev * hdev,const struct hci_cc * cc,struct sk_buff * skb)4177 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4178 		      struct sk_buff *skb)
4179 {
4180 	void *data;
4181 
4182 	if (skb->len < cc->min_len) {
4183 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4184 			   cc->op, skb->len, cc->min_len);
4185 		return HCI_ERROR_UNSPECIFIED;
4186 	}
4187 
4188 	/* Just warn if the length is over max_len size it still be possible to
4189 	 * partially parse the cc so leave to callback to decide if that is
4190 	 * acceptable.
4191 	 */
4192 	if (skb->len > cc->max_len)
4193 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4194 			    cc->op, skb->len, cc->max_len);
4195 
4196 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4197 	if (!data)
4198 		return HCI_ERROR_UNSPECIFIED;
4199 
4200 	return cc->func(hdev, data, skb);
4201 }
4202 
hci_cmd_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4203 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4204 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4205 				 hci_req_complete_t *req_complete,
4206 				 hci_req_complete_skb_t *req_complete_skb)
4207 {
4208 	struct hci_ev_cmd_complete *ev = data;
4209 	int i;
4210 
4211 	*opcode = __le16_to_cpu(ev->opcode);
4212 
4213 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4214 
4215 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4216 		if (hci_cc_table[i].op == *opcode) {
4217 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4218 			break;
4219 		}
4220 	}
4221 
4222 	if (i == ARRAY_SIZE(hci_cc_table)) {
4223 		/* Unknown opcode, assume byte 0 contains the status, so
4224 		 * that e.g. __hci_cmd_sync() properly returns errors
4225 		 * for vendor specific commands send by HCI drivers.
4226 		 * If a vendor doesn't actually follow this convention we may
4227 		 * need to introduce a vendor CC table in order to properly set
4228 		 * the status.
4229 		 */
4230 		*status = skb->data[0];
4231 	}
4232 
4233 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4234 
4235 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4236 			     req_complete_skb);
4237 
4238 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4239 		bt_dev_err(hdev,
4240 			   "unexpected event for opcode 0x%4.4x", *opcode);
4241 		return;
4242 	}
4243 
4244 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4245 		queue_work(hdev->workqueue, &hdev->cmd_work);
4246 }
4247 
hci_cs_le_create_cis(struct hci_dev * hdev,u8 status)4248 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4249 {
4250 	struct hci_cp_le_create_cis *cp;
4251 	bool pending = false;
4252 	int i;
4253 
4254 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4255 
4256 	if (!status)
4257 		return;
4258 
4259 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4260 	if (!cp)
4261 		return;
4262 
4263 	hci_dev_lock(hdev);
4264 
4265 	/* Remove connection if command failed */
4266 	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4267 		struct hci_conn *conn;
4268 		u16 handle;
4269 
4270 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4271 
4272 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4273 		if (conn) {
4274 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4275 					       &conn->flags))
4276 				pending = true;
4277 			conn->state = BT_CLOSED;
4278 			hci_connect_cfm(conn, status);
4279 			hci_conn_del(conn);
4280 		}
4281 	}
4282 
4283 	if (pending)
4284 		hci_le_create_cis_pending(hdev);
4285 
4286 	hci_dev_unlock(hdev);
4287 }
4288 
4289 #define HCI_CS(_op, _func) \
4290 { \
4291 	.op = _op, \
4292 	.func = _func, \
4293 }
4294 
4295 static const struct hci_cs {
4296 	u16  op;
4297 	void (*func)(struct hci_dev *hdev, __u8 status);
4298 } hci_cs_table[] = {
4299 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4300 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4301 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4302 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4303 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4304 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4305 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4306 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4307 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4308 	       hci_cs_read_remote_ext_features),
4309 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4310 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4311 	       hci_cs_enhanced_setup_sync_conn),
4312 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4313 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4314 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4315 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4316 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4317 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4318 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4319 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4320 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4321 };
4322 
hci_cmd_status_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4323 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4324 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4325 			       hci_req_complete_t *req_complete,
4326 			       hci_req_complete_skb_t *req_complete_skb)
4327 {
4328 	struct hci_ev_cmd_status *ev = data;
4329 	int i;
4330 
4331 	*opcode = __le16_to_cpu(ev->opcode);
4332 	*status = ev->status;
4333 
4334 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4335 
4336 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4337 		if (hci_cs_table[i].op == *opcode) {
4338 			hci_cs_table[i].func(hdev, ev->status);
4339 			break;
4340 		}
4341 	}
4342 
4343 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4344 
4345 	/* Indicate request completion if the command failed. Also, if
4346 	 * we're not waiting for a special event and we get a success
4347 	 * command status we should try to flag the request as completed
4348 	 * (since for this kind of commands there will not be a command
4349 	 * complete event).
4350 	 */
4351 	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4352 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4353 				     req_complete_skb);
4354 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4355 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4356 				   *opcode);
4357 			return;
4358 		}
4359 	}
4360 
4361 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4362 		queue_work(hdev->workqueue, &hdev->cmd_work);
4363 }
4364 
hci_hardware_error_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4365 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4366 				   struct sk_buff *skb)
4367 {
4368 	struct hci_ev_hardware_error *ev = data;
4369 
4370 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4371 
4372 	hdev->hw_error_code = ev->code;
4373 
4374 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4375 }
4376 
hci_role_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4377 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4378 				struct sk_buff *skb)
4379 {
4380 	struct hci_ev_role_change *ev = data;
4381 	struct hci_conn *conn;
4382 
4383 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4384 
4385 	hci_dev_lock(hdev);
4386 
4387 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4388 	if (conn) {
4389 		if (!ev->status)
4390 			conn->role = ev->role;
4391 
4392 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4393 
4394 		hci_role_switch_cfm(conn, ev->status, ev->role);
4395 	}
4396 
4397 	hci_dev_unlock(hdev);
4398 }
4399 
hci_num_comp_pkts_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4400 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4401 				  struct sk_buff *skb)
4402 {
4403 	struct hci_ev_num_comp_pkts *ev = data;
4404 	int i;
4405 
4406 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4407 			     flex_array_size(ev, handles, ev->num)))
4408 		return;
4409 
4410 	bt_dev_dbg(hdev, "num %d", ev->num);
4411 
4412 	for (i = 0; i < ev->num; i++) {
4413 		struct hci_comp_pkts_info *info = &ev->handles[i];
4414 		struct hci_conn *conn;
4415 		__u16  handle, count;
4416 
4417 		handle = __le16_to_cpu(info->handle);
4418 		count  = __le16_to_cpu(info->count);
4419 
4420 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4421 		if (!conn)
4422 			continue;
4423 
4424 		conn->sent -= count;
4425 
4426 		switch (conn->type) {
4427 		case ACL_LINK:
4428 			hdev->acl_cnt += count;
4429 			if (hdev->acl_cnt > hdev->acl_pkts)
4430 				hdev->acl_cnt = hdev->acl_pkts;
4431 			break;
4432 
4433 		case LE_LINK:
4434 			if (hdev->le_pkts) {
4435 				hdev->le_cnt += count;
4436 				if (hdev->le_cnt > hdev->le_pkts)
4437 					hdev->le_cnt = hdev->le_pkts;
4438 			} else {
4439 				hdev->acl_cnt += count;
4440 				if (hdev->acl_cnt > hdev->acl_pkts)
4441 					hdev->acl_cnt = hdev->acl_pkts;
4442 			}
4443 			break;
4444 
4445 		case SCO_LINK:
4446 			hdev->sco_cnt += count;
4447 			if (hdev->sco_cnt > hdev->sco_pkts)
4448 				hdev->sco_cnt = hdev->sco_pkts;
4449 			break;
4450 
4451 		case ISO_LINK:
4452 			if (hdev->iso_pkts) {
4453 				hdev->iso_cnt += count;
4454 				if (hdev->iso_cnt > hdev->iso_pkts)
4455 					hdev->iso_cnt = hdev->iso_pkts;
4456 			} else if (hdev->le_pkts) {
4457 				hdev->le_cnt += count;
4458 				if (hdev->le_cnt > hdev->le_pkts)
4459 					hdev->le_cnt = hdev->le_pkts;
4460 			} else {
4461 				hdev->acl_cnt += count;
4462 				if (hdev->acl_cnt > hdev->acl_pkts)
4463 					hdev->acl_cnt = hdev->acl_pkts;
4464 			}
4465 			break;
4466 
4467 		default:
4468 			bt_dev_err(hdev, "unknown type %d conn %p",
4469 				   conn->type, conn);
4470 			break;
4471 		}
4472 	}
4473 
4474 	queue_work(hdev->workqueue, &hdev->tx_work);
4475 }
4476 
hci_mode_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4477 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4478 				struct sk_buff *skb)
4479 {
4480 	struct hci_ev_mode_change *ev = data;
4481 	struct hci_conn *conn;
4482 
4483 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4484 
4485 	hci_dev_lock(hdev);
4486 
4487 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4488 	if (conn) {
4489 		conn->mode = ev->mode;
4490 
4491 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4492 					&conn->flags)) {
4493 			if (conn->mode == HCI_CM_ACTIVE)
4494 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4495 			else
4496 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4497 		}
4498 
4499 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4500 			hci_sco_setup(conn, ev->status);
4501 	}
4502 
4503 	hci_dev_unlock(hdev);
4504 }
4505 
hci_pin_code_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4506 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4507 				     struct sk_buff *skb)
4508 {
4509 	struct hci_ev_pin_code_req *ev = data;
4510 	struct hci_conn *conn;
4511 
4512 	bt_dev_dbg(hdev, "");
4513 
4514 	hci_dev_lock(hdev);
4515 
4516 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4517 	if (!conn)
4518 		goto unlock;
4519 
4520 	if (conn->state == BT_CONNECTED) {
4521 		hci_conn_hold(conn);
4522 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4523 		hci_conn_drop(conn);
4524 	}
4525 
4526 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4527 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4528 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4529 			     sizeof(ev->bdaddr), &ev->bdaddr);
4530 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4531 		u8 secure;
4532 
4533 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4534 			secure = 1;
4535 		else
4536 			secure = 0;
4537 
4538 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4539 	}
4540 
4541 unlock:
4542 	hci_dev_unlock(hdev);
4543 }
4544 
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4545 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4546 {
4547 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4548 		return;
4549 
4550 	conn->pin_length = pin_len;
4551 	conn->key_type = key_type;
4552 
4553 	switch (key_type) {
4554 	case HCI_LK_LOCAL_UNIT:
4555 	case HCI_LK_REMOTE_UNIT:
4556 	case HCI_LK_DEBUG_COMBINATION:
4557 		return;
4558 	case HCI_LK_COMBINATION:
4559 		if (pin_len == 16)
4560 			conn->pending_sec_level = BT_SECURITY_HIGH;
4561 		else
4562 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4563 		break;
4564 	case HCI_LK_UNAUTH_COMBINATION_P192:
4565 	case HCI_LK_UNAUTH_COMBINATION_P256:
4566 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4567 		break;
4568 	case HCI_LK_AUTH_COMBINATION_P192:
4569 		conn->pending_sec_level = BT_SECURITY_HIGH;
4570 		break;
4571 	case HCI_LK_AUTH_COMBINATION_P256:
4572 		conn->pending_sec_level = BT_SECURITY_FIPS;
4573 		break;
4574 	}
4575 }
4576 
hci_link_key_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4577 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4578 				     struct sk_buff *skb)
4579 {
4580 	struct hci_ev_link_key_req *ev = data;
4581 	struct hci_cp_link_key_reply cp;
4582 	struct hci_conn *conn;
4583 	struct link_key *key;
4584 
4585 	bt_dev_dbg(hdev, "");
4586 
4587 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4588 		return;
4589 
4590 	hci_dev_lock(hdev);
4591 
4592 	key = hci_find_link_key(hdev, &ev->bdaddr);
4593 	if (!key) {
4594 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4595 		goto not_found;
4596 	}
4597 
4598 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4599 
4600 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4601 	if (conn) {
4602 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4603 
4604 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4605 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4606 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4607 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4608 			goto not_found;
4609 		}
4610 
4611 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4612 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4613 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4614 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4615 			goto not_found;
4616 		}
4617 
4618 		conn_set_key(conn, key->type, key->pin_len);
4619 	}
4620 
4621 	bacpy(&cp.bdaddr, &ev->bdaddr);
4622 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4623 
4624 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4625 
4626 	hci_dev_unlock(hdev);
4627 
4628 	return;
4629 
4630 not_found:
4631 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4632 	hci_dev_unlock(hdev);
4633 }
4634 
hci_link_key_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4635 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4636 				    struct sk_buff *skb)
4637 {
4638 	struct hci_ev_link_key_notify *ev = data;
4639 	struct hci_conn *conn;
4640 	struct link_key *key;
4641 	bool persistent;
4642 	u8 pin_len = 0;
4643 
4644 	bt_dev_dbg(hdev, "");
4645 
4646 	hci_dev_lock(hdev);
4647 
4648 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4649 	if (!conn)
4650 		goto unlock;
4651 
4652 	/* Ignore NULL link key against CVE-2020-26555 */
4653 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4654 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4655 			   &ev->bdaddr);
4656 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4657 		hci_conn_drop(conn);
4658 		goto unlock;
4659 	}
4660 
4661 	hci_conn_hold(conn);
4662 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4663 	hci_conn_drop(conn);
4664 
4665 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4666 	conn_set_key(conn, ev->key_type, conn->pin_length);
4667 
4668 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4669 		goto unlock;
4670 
4671 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4672 			        ev->key_type, pin_len, &persistent);
4673 	if (!key)
4674 		goto unlock;
4675 
4676 	/* Update connection information since adding the key will have
4677 	 * fixed up the type in the case of changed combination keys.
4678 	 */
4679 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4680 		conn_set_key(conn, key->type, key->pin_len);
4681 
4682 	mgmt_new_link_key(hdev, key, persistent);
4683 
4684 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4685 	 * is set. If it's not set simply remove the key from the kernel
4686 	 * list (we've still notified user space about it but with
4687 	 * store_hint being 0).
4688 	 */
4689 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4690 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4691 		list_del_rcu(&key->list);
4692 		kfree_rcu(key, rcu);
4693 		goto unlock;
4694 	}
4695 
4696 	if (persistent)
4697 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4698 	else
4699 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4700 
4701 unlock:
4702 	hci_dev_unlock(hdev);
4703 }
4704 
hci_clock_offset_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4705 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4706 				 struct sk_buff *skb)
4707 {
4708 	struct hci_ev_clock_offset *ev = data;
4709 	struct hci_conn *conn;
4710 
4711 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4712 
4713 	hci_dev_lock(hdev);
4714 
4715 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4716 	if (conn && !ev->status) {
4717 		struct inquiry_entry *ie;
4718 
4719 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4720 		if (ie) {
4721 			ie->data.clock_offset = ev->clock_offset;
4722 			ie->timestamp = jiffies;
4723 		}
4724 	}
4725 
4726 	hci_dev_unlock(hdev);
4727 }
4728 
hci_pkt_type_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4729 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4730 				    struct sk_buff *skb)
4731 {
4732 	struct hci_ev_pkt_type_change *ev = data;
4733 	struct hci_conn *conn;
4734 
4735 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4736 
4737 	hci_dev_lock(hdev);
4738 
4739 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4740 	if (conn && !ev->status)
4741 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4742 
4743 	hci_dev_unlock(hdev);
4744 }
4745 
hci_pscan_rep_mode_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4746 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4747 				   struct sk_buff *skb)
4748 {
4749 	struct hci_ev_pscan_rep_mode *ev = data;
4750 	struct inquiry_entry *ie;
4751 
4752 	bt_dev_dbg(hdev, "");
4753 
4754 	hci_dev_lock(hdev);
4755 
4756 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4757 	if (ie) {
4758 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4759 		ie->timestamp = jiffies;
4760 	}
4761 
4762 	hci_dev_unlock(hdev);
4763 }
4764 
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)4765 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4766 					     struct sk_buff *skb)
4767 {
4768 	struct hci_ev_inquiry_result_rssi *ev = edata;
4769 	struct inquiry_data data;
4770 	int i;
4771 
4772 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4773 
4774 	if (!ev->num)
4775 		return;
4776 
4777 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4778 		return;
4779 
4780 	hci_dev_lock(hdev);
4781 
4782 	if (skb->len == array_size(ev->num,
4783 				   sizeof(struct inquiry_info_rssi_pscan))) {
4784 		struct inquiry_info_rssi_pscan *info;
4785 
4786 		for (i = 0; i < ev->num; i++) {
4787 			u32 flags;
4788 
4789 			info = hci_ev_skb_pull(hdev, skb,
4790 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4791 					       sizeof(*info));
4792 			if (!info) {
4793 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4794 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4795 				goto unlock;
4796 			}
4797 
4798 			bacpy(&data.bdaddr, &info->bdaddr);
4799 			data.pscan_rep_mode	= info->pscan_rep_mode;
4800 			data.pscan_period_mode	= info->pscan_period_mode;
4801 			data.pscan_mode		= info->pscan_mode;
4802 			memcpy(data.dev_class, info->dev_class, 3);
4803 			data.clock_offset	= info->clock_offset;
4804 			data.rssi		= info->rssi;
4805 			data.ssp_mode		= 0x00;
4806 
4807 			flags = hci_inquiry_cache_update(hdev, &data, false);
4808 
4809 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4810 					  info->dev_class, info->rssi,
4811 					  flags, NULL, 0, NULL, 0, 0);
4812 		}
4813 	} else if (skb->len == array_size(ev->num,
4814 					  sizeof(struct inquiry_info_rssi))) {
4815 		struct inquiry_info_rssi *info;
4816 
4817 		for (i = 0; i < ev->num; i++) {
4818 			u32 flags;
4819 
4820 			info = hci_ev_skb_pull(hdev, skb,
4821 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4822 					       sizeof(*info));
4823 			if (!info) {
4824 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4825 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4826 				goto unlock;
4827 			}
4828 
4829 			bacpy(&data.bdaddr, &info->bdaddr);
4830 			data.pscan_rep_mode	= info->pscan_rep_mode;
4831 			data.pscan_period_mode	= info->pscan_period_mode;
4832 			data.pscan_mode		= 0x00;
4833 			memcpy(data.dev_class, info->dev_class, 3);
4834 			data.clock_offset	= info->clock_offset;
4835 			data.rssi		= info->rssi;
4836 			data.ssp_mode		= 0x00;
4837 
4838 			flags = hci_inquiry_cache_update(hdev, &data, false);
4839 
4840 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4841 					  info->dev_class, info->rssi,
4842 					  flags, NULL, 0, NULL, 0, 0);
4843 		}
4844 	} else {
4845 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4846 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4847 	}
4848 unlock:
4849 	hci_dev_unlock(hdev);
4850 }
4851 
hci_remote_ext_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4852 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4853 					struct sk_buff *skb)
4854 {
4855 	struct hci_ev_remote_ext_features *ev = data;
4856 	struct hci_conn *conn;
4857 
4858 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4859 
4860 	hci_dev_lock(hdev);
4861 
4862 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4863 	if (!conn)
4864 		goto unlock;
4865 
4866 	if (ev->page < HCI_MAX_PAGES)
4867 		memcpy(conn->features[ev->page], ev->features, 8);
4868 
4869 	if (!ev->status && ev->page == 0x01) {
4870 		struct inquiry_entry *ie;
4871 
4872 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4873 		if (ie)
4874 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4875 
4876 		if (ev->features[0] & LMP_HOST_SSP) {
4877 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4878 		} else {
4879 			/* It is mandatory by the Bluetooth specification that
4880 			 * Extended Inquiry Results are only used when Secure
4881 			 * Simple Pairing is enabled, but some devices violate
4882 			 * this.
4883 			 *
4884 			 * To make these devices work, the internal SSP
4885 			 * enabled flag needs to be cleared if the remote host
4886 			 * features do not indicate SSP support */
4887 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4888 		}
4889 
4890 		if (ev->features[0] & LMP_HOST_SC)
4891 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4892 	}
4893 
4894 	if (conn->state != BT_CONFIG)
4895 		goto unlock;
4896 
4897 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4898 		struct hci_cp_remote_name_req cp;
4899 		memset(&cp, 0, sizeof(cp));
4900 		bacpy(&cp.bdaddr, &conn->dst);
4901 		cp.pscan_rep_mode = 0x02;
4902 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4903 	} else {
4904 		mgmt_device_connected(hdev, conn, NULL, 0);
4905 	}
4906 
4907 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4908 		conn->state = BT_CONNECTED;
4909 		hci_connect_cfm(conn, ev->status);
4910 		hci_conn_drop(conn);
4911 	}
4912 
4913 unlock:
4914 	hci_dev_unlock(hdev);
4915 }
4916 
hci_sync_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4917 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4918 				       struct sk_buff *skb)
4919 {
4920 	struct hci_ev_sync_conn_complete *ev = data;
4921 	struct hci_conn *conn;
4922 	u8 status = ev->status;
4923 
4924 	switch (ev->link_type) {
4925 	case SCO_LINK:
4926 	case ESCO_LINK:
4927 		break;
4928 	default:
4929 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4930 		 * for HCI_Synchronous_Connection_Complete is limited to
4931 		 * either SCO or eSCO
4932 		 */
4933 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4934 		return;
4935 	}
4936 
4937 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4938 
4939 	hci_dev_lock(hdev);
4940 
4941 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4942 	if (!conn) {
4943 		if (ev->link_type == ESCO_LINK)
4944 			goto unlock;
4945 
4946 		/* When the link type in the event indicates SCO connection
4947 		 * and lookup of the connection object fails, then check
4948 		 * if an eSCO connection object exists.
4949 		 *
4950 		 * The core limits the synchronous connections to either
4951 		 * SCO or eSCO. The eSCO connection is preferred and tried
4952 		 * to be setup first and until successfully established,
4953 		 * the link type will be hinted as eSCO.
4954 		 */
4955 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4956 		if (!conn)
4957 			goto unlock;
4958 	}
4959 
4960 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4961 	 * Processing it more than once per connection can corrupt kernel memory.
4962 	 *
4963 	 * As the connection handle is set here for the first time, it indicates
4964 	 * whether the connection is already set up.
4965 	 */
4966 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4967 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4968 		goto unlock;
4969 	}
4970 
4971 	switch (status) {
4972 	case 0x00:
4973 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4974 		if (status) {
4975 			conn->state = BT_CLOSED;
4976 			break;
4977 		}
4978 
4979 		conn->state  = BT_CONNECTED;
4980 		conn->type   = ev->link_type;
4981 
4982 		hci_debugfs_create_conn(conn);
4983 		hci_conn_add_sysfs(conn);
4984 		break;
4985 
4986 	case 0x10:	/* Connection Accept Timeout */
4987 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4988 	case 0x11:	/* Unsupported Feature or Parameter Value */
4989 	case 0x1c:	/* SCO interval rejected */
4990 	case 0x1a:	/* Unsupported Remote Feature */
4991 	case 0x1e:	/* Invalid LMP Parameters */
4992 	case 0x1f:	/* Unspecified error */
4993 	case 0x20:	/* Unsupported LMP Parameter value */
4994 		if (conn->out) {
4995 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4996 					(hdev->esco_type & EDR_ESCO_MASK);
4997 			if (hci_setup_sync(conn, conn->parent->handle))
4998 				goto unlock;
4999 		}
5000 		fallthrough;
5001 
5002 	default:
5003 		conn->state = BT_CLOSED;
5004 		break;
5005 	}
5006 
5007 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5008 	/* Notify only in case of SCO over HCI transport data path which
5009 	 * is zero and non-zero value shall be non-HCI transport data path
5010 	 */
5011 	if (conn->codec.data_path == 0 && hdev->notify) {
5012 		switch (ev->air_mode) {
5013 		case 0x02:
5014 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5015 			break;
5016 		case 0x03:
5017 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5018 			break;
5019 		}
5020 	}
5021 
5022 	hci_connect_cfm(conn, status);
5023 	if (status)
5024 		hci_conn_del(conn);
5025 
5026 unlock:
5027 	hci_dev_unlock(hdev);
5028 }
5029 
eir_get_length(u8 * eir,size_t eir_len)5030 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5031 {
5032 	size_t parsed = 0;
5033 
5034 	while (parsed < eir_len) {
5035 		u8 field_len = eir[0];
5036 
5037 		if (field_len == 0)
5038 			return parsed;
5039 
5040 		parsed += field_len + 1;
5041 		eir += field_len + 1;
5042 	}
5043 
5044 	return eir_len;
5045 }
5046 
hci_extended_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5047 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5048 					    struct sk_buff *skb)
5049 {
5050 	struct hci_ev_ext_inquiry_result *ev = edata;
5051 	struct inquiry_data data;
5052 	size_t eir_len;
5053 	int i;
5054 
5055 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5056 			     flex_array_size(ev, info, ev->num)))
5057 		return;
5058 
5059 	bt_dev_dbg(hdev, "num %d", ev->num);
5060 
5061 	if (!ev->num)
5062 		return;
5063 
5064 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5065 		return;
5066 
5067 	hci_dev_lock(hdev);
5068 
5069 	for (i = 0; i < ev->num; i++) {
5070 		struct extended_inquiry_info *info = &ev->info[i];
5071 		u32 flags;
5072 		bool name_known;
5073 
5074 		bacpy(&data.bdaddr, &info->bdaddr);
5075 		data.pscan_rep_mode	= info->pscan_rep_mode;
5076 		data.pscan_period_mode	= info->pscan_period_mode;
5077 		data.pscan_mode		= 0x00;
5078 		memcpy(data.dev_class, info->dev_class, 3);
5079 		data.clock_offset	= info->clock_offset;
5080 		data.rssi		= info->rssi;
5081 		data.ssp_mode		= 0x01;
5082 
5083 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5084 			name_known = eir_get_data(info->data,
5085 						  sizeof(info->data),
5086 						  EIR_NAME_COMPLETE, NULL);
5087 		else
5088 			name_known = true;
5089 
5090 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5091 
5092 		eir_len = eir_get_length(info->data, sizeof(info->data));
5093 
5094 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5095 				  info->dev_class, info->rssi,
5096 				  flags, info->data, eir_len, NULL, 0, 0);
5097 	}
5098 
5099 	hci_dev_unlock(hdev);
5100 }
5101 
hci_key_refresh_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5102 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5103 					 struct sk_buff *skb)
5104 {
5105 	struct hci_ev_key_refresh_complete *ev = data;
5106 	struct hci_conn *conn;
5107 
5108 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5109 		   __le16_to_cpu(ev->handle));
5110 
5111 	hci_dev_lock(hdev);
5112 
5113 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5114 	if (!conn)
5115 		goto unlock;
5116 
5117 	/* For BR/EDR the necessary steps are taken through the
5118 	 * auth_complete event.
5119 	 */
5120 	if (conn->type != LE_LINK)
5121 		goto unlock;
5122 
5123 	if (!ev->status)
5124 		conn->sec_level = conn->pending_sec_level;
5125 
5126 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5127 
5128 	if (ev->status && conn->state == BT_CONNECTED) {
5129 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5130 		hci_conn_drop(conn);
5131 		goto unlock;
5132 	}
5133 
5134 	if (conn->state == BT_CONFIG) {
5135 		if (!ev->status)
5136 			conn->state = BT_CONNECTED;
5137 
5138 		hci_connect_cfm(conn, ev->status);
5139 		hci_conn_drop(conn);
5140 	} else {
5141 		hci_auth_cfm(conn, ev->status);
5142 
5143 		hci_conn_hold(conn);
5144 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5145 		hci_conn_drop(conn);
5146 	}
5147 
5148 unlock:
5149 	hci_dev_unlock(hdev);
5150 }
5151 
hci_get_auth_req(struct hci_conn * conn)5152 static u8 hci_get_auth_req(struct hci_conn *conn)
5153 {
5154 	/* If remote requests no-bonding follow that lead */
5155 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5156 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5157 		return conn->remote_auth | (conn->auth_type & 0x01);
5158 
5159 	/* If both remote and local have enough IO capabilities, require
5160 	 * MITM protection
5161 	 */
5162 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5163 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5164 		return conn->remote_auth | 0x01;
5165 
5166 	/* No MITM protection possible so ignore remote requirement */
5167 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5168 }
5169 
bredr_oob_data_present(struct hci_conn * conn)5170 static u8 bredr_oob_data_present(struct hci_conn *conn)
5171 {
5172 	struct hci_dev *hdev = conn->hdev;
5173 	struct oob_data *data;
5174 
5175 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5176 	if (!data)
5177 		return 0x00;
5178 
5179 	if (bredr_sc_enabled(hdev)) {
5180 		/* When Secure Connections is enabled, then just
5181 		 * return the present value stored with the OOB
5182 		 * data. The stored value contains the right present
5183 		 * information. However it can only be trusted when
5184 		 * not in Secure Connection Only mode.
5185 		 */
5186 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5187 			return data->present;
5188 
5189 		/* When Secure Connections Only mode is enabled, then
5190 		 * the P-256 values are required. If they are not
5191 		 * available, then do not declare that OOB data is
5192 		 * present.
5193 		 */
5194 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5195 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5196 			return 0x00;
5197 
5198 		return 0x02;
5199 	}
5200 
5201 	/* When Secure Connections is not enabled or actually
5202 	 * not supported by the hardware, then check that if
5203 	 * P-192 data values are present.
5204 	 */
5205 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5206 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5207 		return 0x00;
5208 
5209 	return 0x01;
5210 }
5211 
hci_io_capa_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5212 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5213 				    struct sk_buff *skb)
5214 {
5215 	struct hci_ev_io_capa_request *ev = data;
5216 	struct hci_conn *conn;
5217 
5218 	bt_dev_dbg(hdev, "");
5219 
5220 	hci_dev_lock(hdev);
5221 
5222 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5223 	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5224 		goto unlock;
5225 
5226 	/* Assume remote supports SSP since it has triggered this event */
5227 	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5228 
5229 	hci_conn_hold(conn);
5230 
5231 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5232 		goto unlock;
5233 
5234 	/* Allow pairing if we're pairable, the initiators of the
5235 	 * pairing or if the remote is not requesting bonding.
5236 	 */
5237 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5238 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5239 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5240 		struct hci_cp_io_capability_reply cp;
5241 
5242 		bacpy(&cp.bdaddr, &ev->bdaddr);
5243 		/* Change the IO capability from KeyboardDisplay
5244 		 * to DisplayYesNo as it is not supported by BT spec. */
5245 		cp.capability = (conn->io_capability == 0x04) ?
5246 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5247 
5248 		/* If we are initiators, there is no remote information yet */
5249 		if (conn->remote_auth == 0xff) {
5250 			/* Request MITM protection if our IO caps allow it
5251 			 * except for the no-bonding case.
5252 			 */
5253 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5254 			    conn->auth_type != HCI_AT_NO_BONDING)
5255 				conn->auth_type |= 0x01;
5256 		} else {
5257 			conn->auth_type = hci_get_auth_req(conn);
5258 		}
5259 
5260 		/* If we're not bondable, force one of the non-bondable
5261 		 * authentication requirement values.
5262 		 */
5263 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5264 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5265 
5266 		cp.authentication = conn->auth_type;
5267 		cp.oob_data = bredr_oob_data_present(conn);
5268 
5269 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5270 			     sizeof(cp), &cp);
5271 	} else {
5272 		struct hci_cp_io_capability_neg_reply cp;
5273 
5274 		bacpy(&cp.bdaddr, &ev->bdaddr);
5275 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5276 
5277 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5278 			     sizeof(cp), &cp);
5279 	}
5280 
5281 unlock:
5282 	hci_dev_unlock(hdev);
5283 }
5284 
hci_io_capa_reply_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5285 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5286 				  struct sk_buff *skb)
5287 {
5288 	struct hci_ev_io_capa_reply *ev = data;
5289 	struct hci_conn *conn;
5290 
5291 	bt_dev_dbg(hdev, "");
5292 
5293 	hci_dev_lock(hdev);
5294 
5295 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5296 	if (!conn)
5297 		goto unlock;
5298 
5299 	conn->remote_cap = ev->capability;
5300 	conn->remote_auth = ev->authentication;
5301 
5302 unlock:
5303 	hci_dev_unlock(hdev);
5304 }
5305 
hci_user_confirm_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5306 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5307 					 struct sk_buff *skb)
5308 {
5309 	struct hci_ev_user_confirm_req *ev = data;
5310 	int loc_mitm, rem_mitm, confirm_hint = 0;
5311 	struct hci_conn *conn;
5312 
5313 	bt_dev_dbg(hdev, "");
5314 
5315 	hci_dev_lock(hdev);
5316 
5317 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5318 		goto unlock;
5319 
5320 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5321 	if (!conn)
5322 		goto unlock;
5323 
5324 	loc_mitm = (conn->auth_type & 0x01);
5325 	rem_mitm = (conn->remote_auth & 0x01);
5326 
5327 	/* If we require MITM but the remote device can't provide that
5328 	 * (it has NoInputNoOutput) then reject the confirmation
5329 	 * request. We check the security level here since it doesn't
5330 	 * necessarily match conn->auth_type.
5331 	 */
5332 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5333 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5334 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5335 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5336 			     sizeof(ev->bdaddr), &ev->bdaddr);
5337 		goto unlock;
5338 	}
5339 
5340 	/* If no side requires MITM protection; auto-accept */
5341 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5342 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5343 
5344 		/* If we're not the initiators request authorization to
5345 		 * proceed from user space (mgmt_user_confirm with
5346 		 * confirm_hint set to 1). The exception is if neither
5347 		 * side had MITM or if the local IO capability is
5348 		 * NoInputNoOutput, in which case we do auto-accept
5349 		 */
5350 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5351 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5352 		    (loc_mitm || rem_mitm)) {
5353 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5354 			confirm_hint = 1;
5355 			goto confirm;
5356 		}
5357 
5358 		/* If there already exists link key in local host, leave the
5359 		 * decision to user space since the remote device could be
5360 		 * legitimate or malicious.
5361 		 */
5362 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5363 			bt_dev_dbg(hdev, "Local host already has link key");
5364 			confirm_hint = 1;
5365 			goto confirm;
5366 		}
5367 
5368 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5369 		       hdev->auto_accept_delay);
5370 
5371 		if (hdev->auto_accept_delay > 0) {
5372 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5373 			queue_delayed_work(conn->hdev->workqueue,
5374 					   &conn->auto_accept_work, delay);
5375 			goto unlock;
5376 		}
5377 
5378 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5379 			     sizeof(ev->bdaddr), &ev->bdaddr);
5380 		goto unlock;
5381 	}
5382 
5383 confirm:
5384 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5385 				  le32_to_cpu(ev->passkey), confirm_hint);
5386 
5387 unlock:
5388 	hci_dev_unlock(hdev);
5389 }
5390 
hci_user_passkey_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5391 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5392 					 struct sk_buff *skb)
5393 {
5394 	struct hci_ev_user_passkey_req *ev = data;
5395 
5396 	bt_dev_dbg(hdev, "");
5397 
5398 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5399 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5400 }
5401 
hci_user_passkey_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5402 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5403 					struct sk_buff *skb)
5404 {
5405 	struct hci_ev_user_passkey_notify *ev = data;
5406 	struct hci_conn *conn;
5407 
5408 	bt_dev_dbg(hdev, "");
5409 
5410 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5411 	if (!conn)
5412 		return;
5413 
5414 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5415 	conn->passkey_entered = 0;
5416 
5417 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5418 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5419 					 conn->dst_type, conn->passkey_notify,
5420 					 conn->passkey_entered);
5421 }
5422 
hci_keypress_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5423 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5424 				    struct sk_buff *skb)
5425 {
5426 	struct hci_ev_keypress_notify *ev = data;
5427 	struct hci_conn *conn;
5428 
5429 	bt_dev_dbg(hdev, "");
5430 
5431 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5432 	if (!conn)
5433 		return;
5434 
5435 	switch (ev->type) {
5436 	case HCI_KEYPRESS_STARTED:
5437 		conn->passkey_entered = 0;
5438 		return;
5439 
5440 	case HCI_KEYPRESS_ENTERED:
5441 		conn->passkey_entered++;
5442 		break;
5443 
5444 	case HCI_KEYPRESS_ERASED:
5445 		conn->passkey_entered--;
5446 		break;
5447 
5448 	case HCI_KEYPRESS_CLEARED:
5449 		conn->passkey_entered = 0;
5450 		break;
5451 
5452 	case HCI_KEYPRESS_COMPLETED:
5453 		return;
5454 	}
5455 
5456 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5457 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5458 					 conn->dst_type, conn->passkey_notify,
5459 					 conn->passkey_entered);
5460 }
5461 
hci_simple_pair_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5462 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5463 					 struct sk_buff *skb)
5464 {
5465 	struct hci_ev_simple_pair_complete *ev = data;
5466 	struct hci_conn *conn;
5467 
5468 	bt_dev_dbg(hdev, "");
5469 
5470 	hci_dev_lock(hdev);
5471 
5472 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5473 	if (!conn || !hci_conn_ssp_enabled(conn))
5474 		goto unlock;
5475 
5476 	/* Reset the authentication requirement to unknown */
5477 	conn->remote_auth = 0xff;
5478 
5479 	/* To avoid duplicate auth_failed events to user space we check
5480 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5481 	 * initiated the authentication. A traditional auth_complete
5482 	 * event gets always produced as initiator and is also mapped to
5483 	 * the mgmt_auth_failed event */
5484 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5485 		mgmt_auth_failed(conn, ev->status);
5486 
5487 	hci_conn_drop(conn);
5488 
5489 unlock:
5490 	hci_dev_unlock(hdev);
5491 }
5492 
hci_remote_host_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5493 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5494 					 struct sk_buff *skb)
5495 {
5496 	struct hci_ev_remote_host_features *ev = data;
5497 	struct inquiry_entry *ie;
5498 	struct hci_conn *conn;
5499 
5500 	bt_dev_dbg(hdev, "");
5501 
5502 	hci_dev_lock(hdev);
5503 
5504 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5505 	if (conn)
5506 		memcpy(conn->features[1], ev->features, 8);
5507 
5508 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5509 	if (ie)
5510 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5511 
5512 	hci_dev_unlock(hdev);
5513 }
5514 
hci_remote_oob_data_request_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5515 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5516 					    struct sk_buff *skb)
5517 {
5518 	struct hci_ev_remote_oob_data_request *ev = edata;
5519 	struct oob_data *data;
5520 
5521 	bt_dev_dbg(hdev, "");
5522 
5523 	hci_dev_lock(hdev);
5524 
5525 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5526 		goto unlock;
5527 
5528 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5529 	if (!data) {
5530 		struct hci_cp_remote_oob_data_neg_reply cp;
5531 
5532 		bacpy(&cp.bdaddr, &ev->bdaddr);
5533 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5534 			     sizeof(cp), &cp);
5535 		goto unlock;
5536 	}
5537 
5538 	if (bredr_sc_enabled(hdev)) {
5539 		struct hci_cp_remote_oob_ext_data_reply cp;
5540 
5541 		bacpy(&cp.bdaddr, &ev->bdaddr);
5542 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5543 			memset(cp.hash192, 0, sizeof(cp.hash192));
5544 			memset(cp.rand192, 0, sizeof(cp.rand192));
5545 		} else {
5546 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5547 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5548 		}
5549 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5550 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5551 
5552 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5553 			     sizeof(cp), &cp);
5554 	} else {
5555 		struct hci_cp_remote_oob_data_reply cp;
5556 
5557 		bacpy(&cp.bdaddr, &ev->bdaddr);
5558 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5559 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5560 
5561 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5562 			     sizeof(cp), &cp);
5563 	}
5564 
5565 unlock:
5566 	hci_dev_unlock(hdev);
5567 }
5568 
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5569 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5570 				u8 bdaddr_type, bdaddr_t *local_rpa)
5571 {
5572 	if (conn->out) {
5573 		conn->dst_type = bdaddr_type;
5574 		conn->resp_addr_type = bdaddr_type;
5575 		bacpy(&conn->resp_addr, bdaddr);
5576 
5577 		/* Check if the controller has set a Local RPA then it must be
5578 		 * used instead or hdev->rpa.
5579 		 */
5580 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5581 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5582 			bacpy(&conn->init_addr, local_rpa);
5583 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5584 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5585 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5586 		} else {
5587 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5588 						  &conn->init_addr_type);
5589 		}
5590 	} else {
5591 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5592 		/* Check if the controller has set a Local RPA then it must be
5593 		 * used instead or hdev->rpa.
5594 		 */
5595 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5596 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5597 			bacpy(&conn->resp_addr, local_rpa);
5598 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5599 			/* In case of ext adv, resp_addr will be updated in
5600 			 * Adv Terminated event.
5601 			 */
5602 			if (!ext_adv_capable(conn->hdev))
5603 				bacpy(&conn->resp_addr,
5604 				      &conn->hdev->random_addr);
5605 		} else {
5606 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5607 		}
5608 
5609 		conn->init_addr_type = bdaddr_type;
5610 		bacpy(&conn->init_addr, bdaddr);
5611 
5612 		/* For incoming connections, set the default minimum
5613 		 * and maximum connection interval. They will be used
5614 		 * to check if the parameters are in range and if not
5615 		 * trigger the connection update procedure.
5616 		 */
5617 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5618 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5619 	}
5620 }
5621 
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5622 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5623 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5624 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5625 				 u16 interval, u16 latency,
5626 				 u16 supervision_timeout)
5627 {
5628 	struct hci_conn_params *params;
5629 	struct hci_conn *conn;
5630 	struct smp_irk *irk;
5631 	u8 addr_type;
5632 
5633 	hci_dev_lock(hdev);
5634 
5635 	/* All controllers implicitly stop advertising in the event of a
5636 	 * connection, so ensure that the state bit is cleared.
5637 	 */
5638 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5639 
5640 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5641 	if (!conn) {
5642 		/* In case of error status and there is no connection pending
5643 		 * just unlock as there is nothing to cleanup.
5644 		 */
5645 		if (status)
5646 			goto unlock;
5647 
5648 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5649 		if (IS_ERR(conn)) {
5650 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5651 			goto unlock;
5652 		}
5653 
5654 		conn->dst_type = bdaddr_type;
5655 
5656 		/* If we didn't have a hci_conn object previously
5657 		 * but we're in central role this must be something
5658 		 * initiated using an accept list. Since accept list based
5659 		 * connections are not "first class citizens" we don't
5660 		 * have full tracking of them. Therefore, we go ahead
5661 		 * with a "best effort" approach of determining the
5662 		 * initiator address based on the HCI_PRIVACY flag.
5663 		 */
5664 		if (conn->out) {
5665 			conn->resp_addr_type = bdaddr_type;
5666 			bacpy(&conn->resp_addr, bdaddr);
5667 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5668 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5669 				bacpy(&conn->init_addr, &hdev->rpa);
5670 			} else {
5671 				hci_copy_identity_address(hdev,
5672 							  &conn->init_addr,
5673 							  &conn->init_addr_type);
5674 			}
5675 		}
5676 	} else {
5677 		cancel_delayed_work(&conn->le_conn_timeout);
5678 	}
5679 
5680 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5681 	 * Processing it more than once per connection can corrupt kernel memory.
5682 	 *
5683 	 * As the connection handle is set here for the first time, it indicates
5684 	 * whether the connection is already set up.
5685 	 */
5686 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5687 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5688 		goto unlock;
5689 	}
5690 
5691 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5692 
5693 	/* Lookup the identity address from the stored connection
5694 	 * address and address type.
5695 	 *
5696 	 * When establishing connections to an identity address, the
5697 	 * connection procedure will store the resolvable random
5698 	 * address first. Now if it can be converted back into the
5699 	 * identity address, start using the identity address from
5700 	 * now on.
5701 	 */
5702 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5703 	if (irk) {
5704 		bacpy(&conn->dst, &irk->bdaddr);
5705 		conn->dst_type = irk->addr_type;
5706 	}
5707 
5708 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5709 
5710 	/* All connection failure handling is taken care of by the
5711 	 * hci_conn_failed function which is triggered by the HCI
5712 	 * request completion callbacks used for connecting.
5713 	 */
5714 	if (status || hci_conn_set_handle(conn, handle))
5715 		goto unlock;
5716 
5717 	/* Drop the connection if it has been aborted */
5718 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5719 		hci_conn_drop(conn);
5720 		goto unlock;
5721 	}
5722 
5723 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5724 		addr_type = BDADDR_LE_PUBLIC;
5725 	else
5726 		addr_type = BDADDR_LE_RANDOM;
5727 
5728 	/* Drop the connection if the device is blocked */
5729 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5730 		hci_conn_drop(conn);
5731 		goto unlock;
5732 	}
5733 
5734 	mgmt_device_connected(hdev, conn, NULL, 0);
5735 
5736 	conn->sec_level = BT_SECURITY_LOW;
5737 	conn->state = BT_CONFIG;
5738 
5739 	/* Store current advertising instance as connection advertising instance
5740 	 * when sotfware rotation is in use so it can be re-enabled when
5741 	 * disconnected.
5742 	 */
5743 	if (!ext_adv_capable(hdev))
5744 		conn->adv_instance = hdev->cur_adv_instance;
5745 
5746 	conn->le_conn_interval = interval;
5747 	conn->le_conn_latency = latency;
5748 	conn->le_supv_timeout = supervision_timeout;
5749 
5750 	hci_debugfs_create_conn(conn);
5751 	hci_conn_add_sysfs(conn);
5752 
5753 	/* The remote features procedure is defined for central
5754 	 * role only. So only in case of an initiated connection
5755 	 * request the remote features.
5756 	 *
5757 	 * If the local controller supports peripheral-initiated features
5758 	 * exchange, then requesting the remote features in peripheral
5759 	 * role is possible. Otherwise just transition into the
5760 	 * connected state without requesting the remote features.
5761 	 */
5762 	if (conn->out ||
5763 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5764 		struct hci_cp_le_read_remote_features cp;
5765 
5766 		cp.handle = __cpu_to_le16(conn->handle);
5767 
5768 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5769 			     sizeof(cp), &cp);
5770 
5771 		hci_conn_hold(conn);
5772 	} else {
5773 		conn->state = BT_CONNECTED;
5774 		hci_connect_cfm(conn, status);
5775 	}
5776 
5777 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5778 					   conn->dst_type);
5779 	if (params) {
5780 		hci_pend_le_list_del_init(params);
5781 		if (params->conn) {
5782 			hci_conn_drop(params->conn);
5783 			hci_conn_put(params->conn);
5784 			params->conn = NULL;
5785 		}
5786 	}
5787 
5788 unlock:
5789 	hci_update_passive_scan(hdev);
5790 	hci_dev_unlock(hdev);
5791 }
5792 
hci_le_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5793 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5794 				     struct sk_buff *skb)
5795 {
5796 	struct hci_ev_le_conn_complete *ev = data;
5797 
5798 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5799 
5800 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5801 			     NULL, ev->role, le16_to_cpu(ev->handle),
5802 			     le16_to_cpu(ev->interval),
5803 			     le16_to_cpu(ev->latency),
5804 			     le16_to_cpu(ev->supervision_timeout));
5805 }
5806 
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5807 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5808 					 struct sk_buff *skb)
5809 {
5810 	struct hci_ev_le_enh_conn_complete *ev = data;
5811 
5812 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5813 
5814 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5815 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5816 			     le16_to_cpu(ev->interval),
5817 			     le16_to_cpu(ev->latency),
5818 			     le16_to_cpu(ev->supervision_timeout));
5819 }
5820 
hci_le_ext_adv_term_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5821 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5822 				    struct sk_buff *skb)
5823 {
5824 	struct hci_evt_le_ext_adv_set_term *ev = data;
5825 	struct hci_conn *conn;
5826 	struct adv_info *adv, *n;
5827 
5828 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5829 
5830 	/* The Bluetooth Core 5.3 specification clearly states that this event
5831 	 * shall not be sent when the Host disables the advertising set. So in
5832 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5833 	 *
5834 	 * When the Host disables an advertising set, all cleanup is done via
5835 	 * its command callback and not needed to be duplicated here.
5836 	 */
5837 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5838 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5839 		return;
5840 	}
5841 
5842 	hci_dev_lock(hdev);
5843 
5844 	adv = hci_find_adv_instance(hdev, ev->handle);
5845 
5846 	if (ev->status) {
5847 		if (!adv)
5848 			goto unlock;
5849 
5850 		/* Remove advertising as it has been terminated */
5851 		hci_remove_adv_instance(hdev, ev->handle);
5852 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5853 
5854 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5855 			if (adv->enabled)
5856 				goto unlock;
5857 		}
5858 
5859 		/* We are no longer advertising, clear HCI_LE_ADV */
5860 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5861 		goto unlock;
5862 	}
5863 
5864 	if (adv)
5865 		adv->enabled = false;
5866 
5867 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5868 	if (conn) {
5869 		/* Store handle in the connection so the correct advertising
5870 		 * instance can be re-enabled when disconnected.
5871 		 */
5872 		conn->adv_instance = ev->handle;
5873 
5874 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5875 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5876 			goto unlock;
5877 
5878 		if (!ev->handle) {
5879 			bacpy(&conn->resp_addr, &hdev->random_addr);
5880 			goto unlock;
5881 		}
5882 
5883 		if (adv)
5884 			bacpy(&conn->resp_addr, &adv->random_addr);
5885 	}
5886 
5887 unlock:
5888 	hci_dev_unlock(hdev);
5889 }
5890 
hci_le_conn_update_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5891 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5892 					    struct sk_buff *skb)
5893 {
5894 	struct hci_ev_le_conn_update_complete *ev = data;
5895 	struct hci_conn *conn;
5896 
5897 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5898 
5899 	if (ev->status)
5900 		return;
5901 
5902 	hci_dev_lock(hdev);
5903 
5904 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5905 	if (conn) {
5906 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5907 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5908 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5909 	}
5910 
5911 	hci_dev_unlock(hdev);
5912 }
5913 
5914 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,bool addr_resolved,u8 adv_type)5915 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5916 					      bdaddr_t *addr,
5917 					      u8 addr_type, bool addr_resolved,
5918 					      u8 adv_type)
5919 {
5920 	struct hci_conn *conn;
5921 	struct hci_conn_params *params;
5922 
5923 	/* If the event is not connectable don't proceed further */
5924 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5925 		return NULL;
5926 
5927 	/* Ignore if the device is blocked or hdev is suspended */
5928 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5929 	    hdev->suspended)
5930 		return NULL;
5931 
5932 	/* Most controller will fail if we try to create new connections
5933 	 * while we have an existing one in peripheral role.
5934 	 */
5935 	if (hdev->conn_hash.le_num_peripheral > 0 &&
5936 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5937 	     !(hdev->le_states[3] & 0x10)))
5938 		return NULL;
5939 
5940 	/* If we're not connectable only connect devices that we have in
5941 	 * our pend_le_conns list.
5942 	 */
5943 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5944 					   addr_type);
5945 	if (!params)
5946 		return NULL;
5947 
5948 	if (!params->explicit_connect) {
5949 		switch (params->auto_connect) {
5950 		case HCI_AUTO_CONN_DIRECT:
5951 			/* Only devices advertising with ADV_DIRECT_IND are
5952 			 * triggering a connection attempt. This is allowing
5953 			 * incoming connections from peripheral devices.
5954 			 */
5955 			if (adv_type != LE_ADV_DIRECT_IND)
5956 				return NULL;
5957 			break;
5958 		case HCI_AUTO_CONN_ALWAYS:
5959 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5960 			 * are triggering a connection attempt. This means
5961 			 * that incoming connections from peripheral device are
5962 			 * accepted and also outgoing connections to peripheral
5963 			 * devices are established when found.
5964 			 */
5965 			break;
5966 		default:
5967 			return NULL;
5968 		}
5969 	}
5970 
5971 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5972 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5973 			      HCI_ROLE_MASTER);
5974 	if (!IS_ERR(conn)) {
5975 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5976 		 * by higher layer that tried to connect, if no then
5977 		 * store the pointer since we don't really have any
5978 		 * other owner of the object besides the params that
5979 		 * triggered it. This way we can abort the connection if
5980 		 * the parameters get removed and keep the reference
5981 		 * count consistent once the connection is established.
5982 		 */
5983 
5984 		if (!params->explicit_connect)
5985 			params->conn = hci_conn_get(conn);
5986 
5987 		return conn;
5988 	}
5989 
5990 	switch (PTR_ERR(conn)) {
5991 	case -EBUSY:
5992 		/* If hci_connect() returns -EBUSY it means there is already
5993 		 * an LE connection attempt going on. Since controllers don't
5994 		 * support more than one connection attempt at the time, we
5995 		 * don't consider this an error case.
5996 		 */
5997 		break;
5998 	default:
5999 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6000 		return NULL;
6001 	}
6002 
6003 	return NULL;
6004 }
6005 
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv,bool ctl_time,u64 instant)6006 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6007 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6008 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6009 			       bool ext_adv, bool ctl_time, u64 instant)
6010 {
6011 	struct discovery_state *d = &hdev->discovery;
6012 	struct smp_irk *irk;
6013 	struct hci_conn *conn;
6014 	bool match, bdaddr_resolved;
6015 	u32 flags;
6016 	u8 *ptr;
6017 
6018 	switch (type) {
6019 	case LE_ADV_IND:
6020 	case LE_ADV_DIRECT_IND:
6021 	case LE_ADV_SCAN_IND:
6022 	case LE_ADV_NONCONN_IND:
6023 	case LE_ADV_SCAN_RSP:
6024 		break;
6025 	default:
6026 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6027 				       "type: 0x%02x", type);
6028 		return;
6029 	}
6030 
6031 	if (len > max_adv_len(hdev)) {
6032 		bt_dev_err_ratelimited(hdev,
6033 				       "adv larger than maximum supported");
6034 		return;
6035 	}
6036 
6037 	/* Find the end of the data in case the report contains padded zero
6038 	 * bytes at the end causing an invalid length value.
6039 	 *
6040 	 * When data is NULL, len is 0 so there is no need for extra ptr
6041 	 * check as 'ptr < data + 0' is already false in such case.
6042 	 */
6043 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6044 		if (ptr + 1 + *ptr > data + len)
6045 			break;
6046 	}
6047 
6048 	/* Adjust for actual length. This handles the case when remote
6049 	 * device is advertising with incorrect data length.
6050 	 */
6051 	len = ptr - data;
6052 
6053 	/* If the direct address is present, then this report is from
6054 	 * a LE Direct Advertising Report event. In that case it is
6055 	 * important to see if the address is matching the local
6056 	 * controller address.
6057 	 */
6058 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6059 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6060 						  &bdaddr_resolved);
6061 
6062 		/* Only resolvable random addresses are valid for these
6063 		 * kind of reports and others can be ignored.
6064 		 */
6065 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6066 			return;
6067 
6068 		/* If the controller is not using resolvable random
6069 		 * addresses, then this report can be ignored.
6070 		 */
6071 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6072 			return;
6073 
6074 		/* If the local IRK of the controller does not match
6075 		 * with the resolvable random address provided, then
6076 		 * this report can be ignored.
6077 		 */
6078 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6079 			return;
6080 	}
6081 
6082 	/* Check if we need to convert to identity address */
6083 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6084 	if (irk) {
6085 		bdaddr = &irk->bdaddr;
6086 		bdaddr_type = irk->addr_type;
6087 	}
6088 
6089 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6090 
6091 	/* Check if we have been requested to connect to this device.
6092 	 *
6093 	 * direct_addr is set only for directed advertising reports (it is NULL
6094 	 * for advertising reports) and is already verified to be RPA above.
6095 	 */
6096 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6097 				     type);
6098 	if (!ext_adv && conn && type == LE_ADV_IND &&
6099 	    len <= max_adv_len(hdev)) {
6100 		/* Store report for later inclusion by
6101 		 * mgmt_device_connected
6102 		 */
6103 		memcpy(conn->le_adv_data, data, len);
6104 		conn->le_adv_data_len = len;
6105 	}
6106 
6107 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6108 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6109 	else
6110 		flags = 0;
6111 
6112 	/* All scan results should be sent up for Mesh systems */
6113 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6114 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6115 				  rssi, flags, data, len, NULL, 0, instant);
6116 		return;
6117 	}
6118 
6119 	/* Passive scanning shouldn't trigger any device found events,
6120 	 * except for devices marked as CONN_REPORT for which we do send
6121 	 * device found events, or advertisement monitoring requested.
6122 	 */
6123 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6124 		if (type == LE_ADV_DIRECT_IND)
6125 			return;
6126 
6127 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6128 					       bdaddr, bdaddr_type) &&
6129 		    idr_is_empty(&hdev->adv_monitors_idr))
6130 			return;
6131 
6132 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6133 				  rssi, flags, data, len, NULL, 0, 0);
6134 		return;
6135 	}
6136 
6137 	/* When receiving a scan response, then there is no way to
6138 	 * know if the remote device is connectable or not. However
6139 	 * since scan responses are merged with a previously seen
6140 	 * advertising report, the flags field from that report
6141 	 * will be used.
6142 	 *
6143 	 * In the unlikely case that a controller just sends a scan
6144 	 * response event that doesn't match the pending report, then
6145 	 * it is marked as a standalone SCAN_RSP.
6146 	 */
6147 	if (type == LE_ADV_SCAN_RSP)
6148 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6149 
6150 	/* If there's nothing pending either store the data from this
6151 	 * event or send an immediate device found event if the data
6152 	 * should not be stored for later.
6153 	 */
6154 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6155 		/* If the report will trigger a SCAN_REQ store it for
6156 		 * later merging.
6157 		 */
6158 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6159 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6160 						 rssi, flags, data, len);
6161 			return;
6162 		}
6163 
6164 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6165 				  rssi, flags, data, len, NULL, 0, 0);
6166 		return;
6167 	}
6168 
6169 	/* Check if the pending report is for the same device as the new one */
6170 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6171 		 bdaddr_type == d->last_adv_addr_type);
6172 
6173 	/* If the pending data doesn't match this report or this isn't a
6174 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6175 	 * sending of the pending data.
6176 	 */
6177 	if (type != LE_ADV_SCAN_RSP || !match) {
6178 		/* Send out whatever is in the cache, but skip duplicates */
6179 		if (!match)
6180 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6181 					  d->last_adv_addr_type, NULL,
6182 					  d->last_adv_rssi, d->last_adv_flags,
6183 					  d->last_adv_data,
6184 					  d->last_adv_data_len, NULL, 0, 0);
6185 
6186 		/* If the new report will trigger a SCAN_REQ store it for
6187 		 * later merging.
6188 		 */
6189 		if (!ext_adv && (type == LE_ADV_IND ||
6190 				 type == LE_ADV_SCAN_IND)) {
6191 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6192 						 rssi, flags, data, len);
6193 			return;
6194 		}
6195 
6196 		/* The advertising reports cannot be merged, so clear
6197 		 * the pending report and send out a device found event.
6198 		 */
6199 		clear_pending_adv_report(hdev);
6200 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6201 				  rssi, flags, data, len, NULL, 0, 0);
6202 		return;
6203 	}
6204 
6205 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6206 	 * the new event is a SCAN_RSP. We can therefore proceed with
6207 	 * sending a merged device found event.
6208 	 */
6209 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6210 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6211 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6212 	clear_pending_adv_report(hdev);
6213 }
6214 
hci_le_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6215 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6216 				  struct sk_buff *skb)
6217 {
6218 	struct hci_ev_le_advertising_report *ev = data;
6219 	u64 instant = jiffies;
6220 
6221 	if (!ev->num)
6222 		return;
6223 
6224 	hci_dev_lock(hdev);
6225 
6226 	while (ev->num--) {
6227 		struct hci_ev_le_advertising_info *info;
6228 		s8 rssi;
6229 
6230 		info = hci_le_ev_skb_pull(hdev, skb,
6231 					  HCI_EV_LE_ADVERTISING_REPORT,
6232 					  sizeof(*info));
6233 		if (!info)
6234 			break;
6235 
6236 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6237 					info->length + 1))
6238 			break;
6239 
6240 		if (info->length <= max_adv_len(hdev)) {
6241 			rssi = info->data[info->length];
6242 			process_adv_report(hdev, info->type, &info->bdaddr,
6243 					   info->bdaddr_type, NULL, 0, rssi,
6244 					   info->data, info->length, false,
6245 					   false, instant);
6246 		} else {
6247 			bt_dev_err(hdev, "Dropping invalid advertising data");
6248 		}
6249 	}
6250 
6251 	hci_dev_unlock(hdev);
6252 }
6253 
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)6254 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6255 {
6256 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6257 		switch (evt_type) {
6258 		case LE_LEGACY_ADV_IND:
6259 			return LE_ADV_IND;
6260 		case LE_LEGACY_ADV_DIRECT_IND:
6261 			return LE_ADV_DIRECT_IND;
6262 		case LE_LEGACY_ADV_SCAN_IND:
6263 			return LE_ADV_SCAN_IND;
6264 		case LE_LEGACY_NONCONN_IND:
6265 			return LE_ADV_NONCONN_IND;
6266 		case LE_LEGACY_SCAN_RSP_ADV:
6267 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6268 			return LE_ADV_SCAN_RSP;
6269 		}
6270 
6271 		goto invalid;
6272 	}
6273 
6274 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6275 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6276 			return LE_ADV_DIRECT_IND;
6277 
6278 		return LE_ADV_IND;
6279 	}
6280 
6281 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6282 		return LE_ADV_SCAN_RSP;
6283 
6284 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6285 		return LE_ADV_SCAN_IND;
6286 
6287 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6288 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6289 		return LE_ADV_NONCONN_IND;
6290 
6291 invalid:
6292 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6293 			       evt_type);
6294 
6295 	return LE_ADV_INVALID;
6296 }
6297 
hci_le_ext_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6298 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6299 				      struct sk_buff *skb)
6300 {
6301 	struct hci_ev_le_ext_adv_report *ev = data;
6302 	u64 instant = jiffies;
6303 
6304 	if (!ev->num)
6305 		return;
6306 
6307 	hci_dev_lock(hdev);
6308 
6309 	while (ev->num--) {
6310 		struct hci_ev_le_ext_adv_info *info;
6311 		u8 legacy_evt_type;
6312 		u16 evt_type;
6313 
6314 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6315 					  sizeof(*info));
6316 		if (!info)
6317 			break;
6318 
6319 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6320 					info->length))
6321 			break;
6322 
6323 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6324 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6325 		if (legacy_evt_type != LE_ADV_INVALID) {
6326 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6327 					   info->bdaddr_type, NULL, 0,
6328 					   info->rssi, info->data, info->length,
6329 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6330 					   false, instant);
6331 		}
6332 	}
6333 
6334 	hci_dev_unlock(hdev);
6335 }
6336 
hci_le_pa_term_sync(struct hci_dev * hdev,__le16 handle)6337 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6338 {
6339 	struct hci_cp_le_pa_term_sync cp;
6340 
6341 	memset(&cp, 0, sizeof(cp));
6342 	cp.handle = handle;
6343 
6344 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6345 }
6346 
hci_le_pa_sync_estabilished_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6347 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6348 					    struct sk_buff *skb)
6349 {
6350 	struct hci_ev_le_pa_sync_established *ev = data;
6351 	int mask = hdev->link_mode;
6352 	__u8 flags = 0;
6353 	struct hci_conn *pa_sync;
6354 
6355 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6356 
6357 	hci_dev_lock(hdev);
6358 
6359 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6360 
6361 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6362 	if (!(mask & HCI_LM_ACCEPT)) {
6363 		hci_le_pa_term_sync(hdev, ev->handle);
6364 		goto unlock;
6365 	}
6366 
6367 	if (!(flags & HCI_PROTO_DEFER))
6368 		goto unlock;
6369 
6370 	if (ev->status) {
6371 		/* Add connection to indicate the failed PA sync event */
6372 		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6373 					     HCI_ROLE_SLAVE);
6374 
6375 		if (!pa_sync)
6376 			goto unlock;
6377 
6378 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6379 
6380 		/* Notify iso layer */
6381 		hci_connect_cfm(pa_sync, ev->status);
6382 	}
6383 
6384 unlock:
6385 	hci_dev_unlock(hdev);
6386 }
6387 
hci_le_per_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6388 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6389 				      struct sk_buff *skb)
6390 {
6391 	struct hci_ev_le_per_adv_report *ev = data;
6392 	int mask = hdev->link_mode;
6393 	__u8 flags = 0;
6394 
6395 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6396 
6397 	hci_dev_lock(hdev);
6398 
6399 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6400 	if (!(mask & HCI_LM_ACCEPT))
6401 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6402 
6403 	hci_dev_unlock(hdev);
6404 }
6405 
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6406 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6407 					    struct sk_buff *skb)
6408 {
6409 	struct hci_ev_le_remote_feat_complete *ev = data;
6410 	struct hci_conn *conn;
6411 
6412 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6413 
6414 	hci_dev_lock(hdev);
6415 
6416 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6417 	if (conn) {
6418 		if (!ev->status)
6419 			memcpy(conn->features[0], ev->features, 8);
6420 
6421 		if (conn->state == BT_CONFIG) {
6422 			__u8 status;
6423 
6424 			/* If the local controller supports peripheral-initiated
6425 			 * features exchange, but the remote controller does
6426 			 * not, then it is possible that the error code 0x1a
6427 			 * for unsupported remote feature gets returned.
6428 			 *
6429 			 * In this specific case, allow the connection to
6430 			 * transition into connected state and mark it as
6431 			 * successful.
6432 			 */
6433 			if (!conn->out && ev->status == 0x1a &&
6434 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6435 				status = 0x00;
6436 			else
6437 				status = ev->status;
6438 
6439 			conn->state = BT_CONNECTED;
6440 			hci_connect_cfm(conn, status);
6441 			hci_conn_drop(conn);
6442 		}
6443 	}
6444 
6445 	hci_dev_unlock(hdev);
6446 }
6447 
hci_le_ltk_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6448 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6449 				   struct sk_buff *skb)
6450 {
6451 	struct hci_ev_le_ltk_req *ev = data;
6452 	struct hci_cp_le_ltk_reply cp;
6453 	struct hci_cp_le_ltk_neg_reply neg;
6454 	struct hci_conn *conn;
6455 	struct smp_ltk *ltk;
6456 
6457 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6458 
6459 	hci_dev_lock(hdev);
6460 
6461 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6462 	if (conn == NULL)
6463 		goto not_found;
6464 
6465 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6466 	if (!ltk)
6467 		goto not_found;
6468 
6469 	if (smp_ltk_is_sc(ltk)) {
6470 		/* With SC both EDiv and Rand are set to zero */
6471 		if (ev->ediv || ev->rand)
6472 			goto not_found;
6473 	} else {
6474 		/* For non-SC keys check that EDiv and Rand match */
6475 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6476 			goto not_found;
6477 	}
6478 
6479 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6480 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6481 	cp.handle = cpu_to_le16(conn->handle);
6482 
6483 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6484 
6485 	conn->enc_key_size = ltk->enc_size;
6486 
6487 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6488 
6489 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6490 	 * temporary key used to encrypt a connection following
6491 	 * pairing. It is used during the Encrypted Session Setup to
6492 	 * distribute the keys. Later, security can be re-established
6493 	 * using a distributed LTK.
6494 	 */
6495 	if (ltk->type == SMP_STK) {
6496 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6497 		list_del_rcu(&ltk->list);
6498 		kfree_rcu(ltk, rcu);
6499 	} else {
6500 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6501 	}
6502 
6503 	hci_dev_unlock(hdev);
6504 
6505 	return;
6506 
6507 not_found:
6508 	neg.handle = ev->handle;
6509 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6510 	hci_dev_unlock(hdev);
6511 }
6512 
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6513 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6514 				      u8 reason)
6515 {
6516 	struct hci_cp_le_conn_param_req_neg_reply cp;
6517 
6518 	cp.handle = cpu_to_le16(handle);
6519 	cp.reason = reason;
6520 
6521 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6522 		     &cp);
6523 }
6524 
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6525 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6526 					     struct sk_buff *skb)
6527 {
6528 	struct hci_ev_le_remote_conn_param_req *ev = data;
6529 	struct hci_cp_le_conn_param_req_reply cp;
6530 	struct hci_conn *hcon;
6531 	u16 handle, min, max, latency, timeout;
6532 
6533 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6534 
6535 	handle = le16_to_cpu(ev->handle);
6536 	min = le16_to_cpu(ev->interval_min);
6537 	max = le16_to_cpu(ev->interval_max);
6538 	latency = le16_to_cpu(ev->latency);
6539 	timeout = le16_to_cpu(ev->timeout);
6540 
6541 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6542 	if (!hcon || hcon->state != BT_CONNECTED)
6543 		return send_conn_param_neg_reply(hdev, handle,
6544 						 HCI_ERROR_UNKNOWN_CONN_ID);
6545 
6546 	if (max > hcon->le_conn_max_interval)
6547 		return send_conn_param_neg_reply(hdev, handle,
6548 						 HCI_ERROR_INVALID_LL_PARAMS);
6549 
6550 	if (hci_check_conn_params(min, max, latency, timeout))
6551 		return send_conn_param_neg_reply(hdev, handle,
6552 						 HCI_ERROR_INVALID_LL_PARAMS);
6553 
6554 	if (hcon->role == HCI_ROLE_MASTER) {
6555 		struct hci_conn_params *params;
6556 		u8 store_hint;
6557 
6558 		hci_dev_lock(hdev);
6559 
6560 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6561 						hcon->dst_type);
6562 		if (params) {
6563 			params->conn_min_interval = min;
6564 			params->conn_max_interval = max;
6565 			params->conn_latency = latency;
6566 			params->supervision_timeout = timeout;
6567 			store_hint = 0x01;
6568 		} else {
6569 			store_hint = 0x00;
6570 		}
6571 
6572 		hci_dev_unlock(hdev);
6573 
6574 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6575 				    store_hint, min, max, latency, timeout);
6576 	}
6577 
6578 	cp.handle = ev->handle;
6579 	cp.interval_min = ev->interval_min;
6580 	cp.interval_max = ev->interval_max;
6581 	cp.latency = ev->latency;
6582 	cp.timeout = ev->timeout;
6583 	cp.min_ce_len = 0;
6584 	cp.max_ce_len = 0;
6585 
6586 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6587 }
6588 
hci_le_direct_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6589 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6590 					 struct sk_buff *skb)
6591 {
6592 	struct hci_ev_le_direct_adv_report *ev = data;
6593 	u64 instant = jiffies;
6594 	int i;
6595 
6596 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6597 				flex_array_size(ev, info, ev->num)))
6598 		return;
6599 
6600 	if (!ev->num)
6601 		return;
6602 
6603 	hci_dev_lock(hdev);
6604 
6605 	for (i = 0; i < ev->num; i++) {
6606 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6607 
6608 		process_adv_report(hdev, info->type, &info->bdaddr,
6609 				   info->bdaddr_type, &info->direct_addr,
6610 				   info->direct_addr_type, info->rssi, NULL, 0,
6611 				   false, false, instant);
6612 	}
6613 
6614 	hci_dev_unlock(hdev);
6615 }
6616 
hci_le_phy_update_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6617 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6618 				  struct sk_buff *skb)
6619 {
6620 	struct hci_ev_le_phy_update_complete *ev = data;
6621 	struct hci_conn *conn;
6622 
6623 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6624 
6625 	if (ev->status)
6626 		return;
6627 
6628 	hci_dev_lock(hdev);
6629 
6630 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6631 	if (!conn)
6632 		goto unlock;
6633 
6634 	conn->le_tx_phy = ev->tx_phy;
6635 	conn->le_rx_phy = ev->rx_phy;
6636 
6637 unlock:
6638 	hci_dev_unlock(hdev);
6639 }
6640 
hci_le_cis_estabilished_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6641 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6642 					struct sk_buff *skb)
6643 {
6644 	struct hci_evt_le_cis_established *ev = data;
6645 	struct hci_conn *conn;
6646 	struct bt_iso_qos *qos;
6647 	bool pending = false;
6648 	u16 handle = __le16_to_cpu(ev->handle);
6649 
6650 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6651 
6652 	hci_dev_lock(hdev);
6653 
6654 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6655 	if (!conn) {
6656 		bt_dev_err(hdev,
6657 			   "Unable to find connection with handle 0x%4.4x",
6658 			   handle);
6659 		goto unlock;
6660 	}
6661 
6662 	if (conn->type != ISO_LINK) {
6663 		bt_dev_err(hdev,
6664 			   "Invalid connection link type handle 0x%4.4x",
6665 			   handle);
6666 		goto unlock;
6667 	}
6668 
6669 	qos = &conn->iso_qos;
6670 
6671 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6672 
6673 	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6674 	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6675 	qos->ucast.out.interval = qos->ucast.in.interval;
6676 
6677 	switch (conn->role) {
6678 	case HCI_ROLE_SLAVE:
6679 		/* Convert Transport Latency (us) to Latency (msec) */
6680 		qos->ucast.in.latency =
6681 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6682 					  1000);
6683 		qos->ucast.out.latency =
6684 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6685 					  1000);
6686 		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6687 		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6688 		qos->ucast.in.phy = ev->c_phy;
6689 		qos->ucast.out.phy = ev->p_phy;
6690 		break;
6691 	case HCI_ROLE_MASTER:
6692 		/* Convert Transport Latency (us) to Latency (msec) */
6693 		qos->ucast.out.latency =
6694 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6695 					  1000);
6696 		qos->ucast.in.latency =
6697 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6698 					  1000);
6699 		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6700 		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6701 		qos->ucast.out.phy = ev->c_phy;
6702 		qos->ucast.in.phy = ev->p_phy;
6703 		break;
6704 	}
6705 
6706 	if (!ev->status) {
6707 		conn->state = BT_CONNECTED;
6708 		hci_debugfs_create_conn(conn);
6709 		hci_conn_add_sysfs(conn);
6710 		hci_iso_setup_path(conn);
6711 		goto unlock;
6712 	}
6713 
6714 	conn->state = BT_CLOSED;
6715 	hci_connect_cfm(conn, ev->status);
6716 	hci_conn_del(conn);
6717 
6718 unlock:
6719 	if (pending)
6720 		hci_le_create_cis_pending(hdev);
6721 
6722 	hci_dev_unlock(hdev);
6723 }
6724 
hci_le_reject_cis(struct hci_dev * hdev,__le16 handle)6725 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6726 {
6727 	struct hci_cp_le_reject_cis cp;
6728 
6729 	memset(&cp, 0, sizeof(cp));
6730 	cp.handle = handle;
6731 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6732 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6733 }
6734 
hci_le_accept_cis(struct hci_dev * hdev,__le16 handle)6735 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6736 {
6737 	struct hci_cp_le_accept_cis cp;
6738 
6739 	memset(&cp, 0, sizeof(cp));
6740 	cp.handle = handle;
6741 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6742 }
6743 
hci_le_cis_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6744 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6745 			       struct sk_buff *skb)
6746 {
6747 	struct hci_evt_le_cis_req *ev = data;
6748 	u16 acl_handle, cis_handle;
6749 	struct hci_conn *acl, *cis;
6750 	int mask;
6751 	__u8 flags = 0;
6752 
6753 	acl_handle = __le16_to_cpu(ev->acl_handle);
6754 	cis_handle = __le16_to_cpu(ev->cis_handle);
6755 
6756 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6757 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6758 
6759 	hci_dev_lock(hdev);
6760 
6761 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6762 	if (!acl)
6763 		goto unlock;
6764 
6765 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6766 	if (!(mask & HCI_LM_ACCEPT)) {
6767 		hci_le_reject_cis(hdev, ev->cis_handle);
6768 		goto unlock;
6769 	}
6770 
6771 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6772 	if (!cis) {
6773 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6774 				   cis_handle);
6775 		if (IS_ERR(cis)) {
6776 			hci_le_reject_cis(hdev, ev->cis_handle);
6777 			goto unlock;
6778 		}
6779 	}
6780 
6781 	cis->iso_qos.ucast.cig = ev->cig_id;
6782 	cis->iso_qos.ucast.cis = ev->cis_id;
6783 
6784 	if (!(flags & HCI_PROTO_DEFER)) {
6785 		hci_le_accept_cis(hdev, ev->cis_handle);
6786 	} else {
6787 		cis->state = BT_CONNECT2;
6788 		hci_connect_cfm(cis, 0);
6789 	}
6790 
6791 unlock:
6792 	hci_dev_unlock(hdev);
6793 }
6794 
hci_iso_term_big_sync(struct hci_dev * hdev,void * data)6795 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6796 {
6797 	u8 handle = PTR_UINT(data);
6798 
6799 	return hci_le_terminate_big_sync(hdev, handle,
6800 					 HCI_ERROR_LOCAL_HOST_TERM);
6801 }
6802 
hci_le_create_big_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6803 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6804 					   struct sk_buff *skb)
6805 {
6806 	struct hci_evt_le_create_big_complete *ev = data;
6807 	struct hci_conn *conn;
6808 	__u8 i = 0;
6809 
6810 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6811 
6812 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6813 				flex_array_size(ev, bis_handle, ev->num_bis)))
6814 		return;
6815 
6816 	hci_dev_lock(hdev);
6817 	rcu_read_lock();
6818 
6819 	/* Connect all BISes that are bound to the BIG */
6820 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6821 		if (bacmp(&conn->dst, BDADDR_ANY) ||
6822 		    conn->type != ISO_LINK ||
6823 		    conn->iso_qos.bcast.big != ev->handle)
6824 			continue;
6825 
6826 		if (hci_conn_set_handle(conn,
6827 					__le16_to_cpu(ev->bis_handle[i++])))
6828 			continue;
6829 
6830 		if (!ev->status) {
6831 			conn->state = BT_CONNECTED;
6832 			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6833 			rcu_read_unlock();
6834 			hci_debugfs_create_conn(conn);
6835 			hci_conn_add_sysfs(conn);
6836 			hci_iso_setup_path(conn);
6837 			rcu_read_lock();
6838 			continue;
6839 		}
6840 
6841 		hci_connect_cfm(conn, ev->status);
6842 		rcu_read_unlock();
6843 		hci_conn_del(conn);
6844 		rcu_read_lock();
6845 	}
6846 
6847 	rcu_read_unlock();
6848 
6849 	if (!ev->status && !i)
6850 		/* If no BISes have been connected for the BIG,
6851 		 * terminate. This is in case all bound connections
6852 		 * have been closed before the BIG creation
6853 		 * has completed.
6854 		 */
6855 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6856 				   UINT_PTR(ev->handle), NULL);
6857 
6858 	hci_dev_unlock(hdev);
6859 }
6860 
hci_le_big_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6861 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6862 					    struct sk_buff *skb)
6863 {
6864 	struct hci_evt_le_big_sync_estabilished *ev = data;
6865 	struct hci_conn *bis;
6866 	struct hci_conn *pa_sync;
6867 	int i;
6868 
6869 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6870 
6871 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6872 				flex_array_size(ev, bis, ev->num_bis)))
6873 		return;
6874 
6875 	hci_dev_lock(hdev);
6876 
6877 	if (!ev->status) {
6878 		pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
6879 		if (pa_sync)
6880 			/* Also mark the BIG sync established event on the
6881 			 * associated PA sync hcon
6882 			 */
6883 			set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
6884 	}
6885 
6886 	for (i = 0; i < ev->num_bis; i++) {
6887 		u16 handle = le16_to_cpu(ev->bis[i]);
6888 		__le32 interval;
6889 
6890 		bis = hci_conn_hash_lookup_handle(hdev, handle);
6891 		if (!bis) {
6892 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6893 					   HCI_ROLE_SLAVE, handle);
6894 			if (IS_ERR(bis))
6895 				continue;
6896 		}
6897 
6898 		if (ev->status != 0x42)
6899 			/* Mark PA sync as established */
6900 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6901 
6902 		bis->iso_qos.bcast.big = ev->handle;
6903 		memset(&interval, 0, sizeof(interval));
6904 		memcpy(&interval, ev->latency, sizeof(ev->latency));
6905 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
6906 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6907 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6908 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
6909 
6910 		if (!ev->status) {
6911 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
6912 			hci_iso_setup_path(bis);
6913 		}
6914 	}
6915 
6916 	/* In case BIG sync failed, notify each failed connection to
6917 	 * the user after all hci connections have been added
6918 	 */
6919 	if (ev->status)
6920 		for (i = 0; i < ev->num_bis; i++) {
6921 			u16 handle = le16_to_cpu(ev->bis[i]);
6922 
6923 			bis = hci_conn_hash_lookup_handle(hdev, handle);
6924 			if (!bis)
6925 				continue;
6926 
6927 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
6928 			hci_connect_cfm(bis, ev->status);
6929 		}
6930 
6931 	hci_dev_unlock(hdev);
6932 }
6933 
hci_le_big_info_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6934 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
6935 					   struct sk_buff *skb)
6936 {
6937 	struct hci_evt_le_big_info_adv_report *ev = data;
6938 	int mask = hdev->link_mode;
6939 	__u8 flags = 0;
6940 	struct hci_conn *pa_sync;
6941 
6942 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6943 
6944 	hci_dev_lock(hdev);
6945 
6946 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6947 	if (!(mask & HCI_LM_ACCEPT)) {
6948 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6949 		goto unlock;
6950 	}
6951 
6952 	if (!(flags & HCI_PROTO_DEFER))
6953 		goto unlock;
6954 
6955 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
6956 			(hdev,
6957 			le16_to_cpu(ev->sync_handle));
6958 
6959 	if (pa_sync)
6960 		goto unlock;
6961 
6962 	/* Add connection to indicate the PA sync event */
6963 	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6964 				     HCI_ROLE_SLAVE);
6965 
6966 	if (IS_ERR(pa_sync))
6967 		goto unlock;
6968 
6969 	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
6970 	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
6971 
6972 	/* Notify iso layer */
6973 	hci_connect_cfm(pa_sync, 0x00);
6974 
6975 	/* Notify MGMT layer */
6976 	mgmt_device_connected(hdev, pa_sync, NULL, 0);
6977 
6978 unlock:
6979 	hci_dev_unlock(hdev);
6980 }
6981 
6982 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
6983 [_op] = { \
6984 	.func = _func, \
6985 	.min_len = _min_len, \
6986 	.max_len = _max_len, \
6987 }
6988 
6989 #define HCI_LE_EV(_op, _func, _len) \
6990 	HCI_LE_EV_VL(_op, _func, _len, _len)
6991 
6992 #define HCI_LE_EV_STATUS(_op, _func) \
6993 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
6994 
6995 /* Entries in this table shall have their position according to the subevent
6996  * opcode they handle so the use of the macros above is recommend since it does
6997  * attempt to initialize at its proper index using Designated Initializers that
6998  * way events without a callback function can be ommited.
6999  */
7000 static const struct hci_le_ev {
7001 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7002 	u16  min_len;
7003 	u16  max_len;
7004 } hci_le_ev_table[U8_MAX + 1] = {
7005 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7006 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7007 		  sizeof(struct hci_ev_le_conn_complete)),
7008 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7009 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7010 		     sizeof(struct hci_ev_le_advertising_report),
7011 		     HCI_MAX_EVENT_SIZE),
7012 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7013 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7014 		  hci_le_conn_update_complete_evt,
7015 		  sizeof(struct hci_ev_le_conn_update_complete)),
7016 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7017 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7018 		  hci_le_remote_feat_complete_evt,
7019 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7020 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7021 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7022 		  sizeof(struct hci_ev_le_ltk_req)),
7023 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7024 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7025 		  hci_le_remote_conn_param_req_evt,
7026 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7027 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7028 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7029 		  hci_le_enh_conn_complete_evt,
7030 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7031 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7032 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7033 		     sizeof(struct hci_ev_le_direct_adv_report),
7034 		     HCI_MAX_EVENT_SIZE),
7035 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7036 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7037 		  sizeof(struct hci_ev_le_phy_update_complete)),
7038 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7039 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7040 		     sizeof(struct hci_ev_le_ext_adv_report),
7041 		     HCI_MAX_EVENT_SIZE),
7042 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7043 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7044 		  hci_le_pa_sync_estabilished_evt,
7045 		  sizeof(struct hci_ev_le_pa_sync_established)),
7046 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7047 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7048 				 hci_le_per_adv_report_evt,
7049 				 sizeof(struct hci_ev_le_per_adv_report),
7050 				 HCI_MAX_EVENT_SIZE),
7051 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7052 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7053 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7054 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7055 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7056 		  sizeof(struct hci_evt_le_cis_established)),
7057 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7058 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7059 		  sizeof(struct hci_evt_le_cis_req)),
7060 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7061 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7062 		     hci_le_create_big_complete_evt,
7063 		     sizeof(struct hci_evt_le_create_big_complete),
7064 		     HCI_MAX_EVENT_SIZE),
7065 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7066 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7067 		     hci_le_big_sync_established_evt,
7068 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7069 		     HCI_MAX_EVENT_SIZE),
7070 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7071 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7072 		     hci_le_big_info_adv_report_evt,
7073 		     sizeof(struct hci_evt_le_big_info_adv_report),
7074 		     HCI_MAX_EVENT_SIZE),
7075 };
7076 
hci_le_meta_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7077 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7078 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7079 			    hci_req_complete_t *req_complete,
7080 			    hci_req_complete_skb_t *req_complete_skb)
7081 {
7082 	struct hci_ev_le_meta *ev = data;
7083 	const struct hci_le_ev *subev;
7084 
7085 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7086 
7087 	/* Only match event if command OGF is for LE */
7088 	if (hdev->req_skb &&
7089 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7090 	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7091 		*opcode = hci_skb_opcode(hdev->req_skb);
7092 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7093 				     req_complete_skb);
7094 	}
7095 
7096 	subev = &hci_le_ev_table[ev->subevent];
7097 	if (!subev->func)
7098 		return;
7099 
7100 	if (skb->len < subev->min_len) {
7101 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7102 			   ev->subevent, skb->len, subev->min_len);
7103 		return;
7104 	}
7105 
7106 	/* Just warn if the length is over max_len size it still be
7107 	 * possible to partially parse the event so leave to callback to
7108 	 * decide if that is acceptable.
7109 	 */
7110 	if (skb->len > subev->max_len)
7111 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7112 			    ev->subevent, skb->len, subev->max_len);
7113 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7114 	if (!data)
7115 		return;
7116 
7117 	subev->func(hdev, data, skb);
7118 }
7119 
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)7120 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7121 				 u8 event, struct sk_buff *skb)
7122 {
7123 	struct hci_ev_cmd_complete *ev;
7124 	struct hci_event_hdr *hdr;
7125 
7126 	if (!skb)
7127 		return false;
7128 
7129 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7130 	if (!hdr)
7131 		return false;
7132 
7133 	if (event) {
7134 		if (hdr->evt != event)
7135 			return false;
7136 		return true;
7137 	}
7138 
7139 	/* Check if request ended in Command Status - no way to retrieve
7140 	 * any extra parameters in this case.
7141 	 */
7142 	if (hdr->evt == HCI_EV_CMD_STATUS)
7143 		return false;
7144 
7145 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7146 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7147 			   hdr->evt);
7148 		return false;
7149 	}
7150 
7151 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7152 	if (!ev)
7153 		return false;
7154 
7155 	if (opcode != __le16_to_cpu(ev->opcode)) {
7156 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7157 		       __le16_to_cpu(ev->opcode));
7158 		return false;
7159 	}
7160 
7161 	return true;
7162 }
7163 
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)7164 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7165 				  struct sk_buff *skb)
7166 {
7167 	struct hci_ev_le_advertising_info *adv;
7168 	struct hci_ev_le_direct_adv_info *direct_adv;
7169 	struct hci_ev_le_ext_adv_info *ext_adv;
7170 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7171 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7172 
7173 	hci_dev_lock(hdev);
7174 
7175 	/* If we are currently suspended and this is the first BT event seen,
7176 	 * save the wake reason associated with the event.
7177 	 */
7178 	if (!hdev->suspended || hdev->wake_reason)
7179 		goto unlock;
7180 
7181 	/* Default to remote wake. Values for wake_reason are documented in the
7182 	 * Bluez mgmt api docs.
7183 	 */
7184 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7185 
7186 	/* Once configured for remote wakeup, we should only wake up for
7187 	 * reconnections. It's useful to see which device is waking us up so
7188 	 * keep track of the bdaddr of the connection event that woke us up.
7189 	 */
7190 	if (event == HCI_EV_CONN_REQUEST) {
7191 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7192 		hdev->wake_addr_type = BDADDR_BREDR;
7193 	} else if (event == HCI_EV_CONN_COMPLETE) {
7194 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7195 		hdev->wake_addr_type = BDADDR_BREDR;
7196 	} else if (event == HCI_EV_LE_META) {
7197 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7198 		u8 subevent = le_ev->subevent;
7199 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7200 		u8 num_reports = *ptr;
7201 
7202 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7203 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7204 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7205 		    num_reports) {
7206 			adv = (void *)(ptr + 1);
7207 			direct_adv = (void *)(ptr + 1);
7208 			ext_adv = (void *)(ptr + 1);
7209 
7210 			switch (subevent) {
7211 			case HCI_EV_LE_ADVERTISING_REPORT:
7212 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7213 				hdev->wake_addr_type = adv->bdaddr_type;
7214 				break;
7215 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7216 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7217 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7218 				break;
7219 			case HCI_EV_LE_EXT_ADV_REPORT:
7220 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7221 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7222 				break;
7223 			}
7224 		}
7225 	} else {
7226 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7227 	}
7228 
7229 unlock:
7230 	hci_dev_unlock(hdev);
7231 }
7232 
7233 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7234 [_op] = { \
7235 	.req = false, \
7236 	.func = _func, \
7237 	.min_len = _min_len, \
7238 	.max_len = _max_len, \
7239 }
7240 
7241 #define HCI_EV(_op, _func, _len) \
7242 	HCI_EV_VL(_op, _func, _len, _len)
7243 
7244 #define HCI_EV_STATUS(_op, _func) \
7245 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7246 
7247 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7248 [_op] = { \
7249 	.req = true, \
7250 	.func_req = _func, \
7251 	.min_len = _min_len, \
7252 	.max_len = _max_len, \
7253 }
7254 
7255 #define HCI_EV_REQ(_op, _func, _len) \
7256 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7257 
7258 /* Entries in this table shall have their position according to the event opcode
7259  * they handle so the use of the macros above is recommend since it does attempt
7260  * to initialize at its proper index using Designated Initializers that way
7261  * events without a callback function don't have entered.
7262  */
7263 static const struct hci_ev {
7264 	bool req;
7265 	union {
7266 		void (*func)(struct hci_dev *hdev, void *data,
7267 			     struct sk_buff *skb);
7268 		void (*func_req)(struct hci_dev *hdev, void *data,
7269 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7270 				 hci_req_complete_t *req_complete,
7271 				 hci_req_complete_skb_t *req_complete_skb);
7272 	};
7273 	u16  min_len;
7274 	u16  max_len;
7275 } hci_ev_table[U8_MAX + 1] = {
7276 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7277 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7278 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7279 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7280 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7281 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7282 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7283 	       sizeof(struct hci_ev_conn_complete)),
7284 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7285 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7286 	       sizeof(struct hci_ev_conn_request)),
7287 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7288 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7289 	       sizeof(struct hci_ev_disconn_complete)),
7290 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7291 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7292 	       sizeof(struct hci_ev_auth_complete)),
7293 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7294 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7295 	       sizeof(struct hci_ev_remote_name)),
7296 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7297 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7298 	       sizeof(struct hci_ev_encrypt_change)),
7299 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7300 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7301 	       hci_change_link_key_complete_evt,
7302 	       sizeof(struct hci_ev_change_link_key_complete)),
7303 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7304 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7305 	       sizeof(struct hci_ev_remote_features)),
7306 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7307 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7308 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7309 	/* [0x0f = HCI_EV_CMD_STATUS] */
7310 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7311 		   sizeof(struct hci_ev_cmd_status)),
7312 	/* [0x10 = HCI_EV_CMD_STATUS] */
7313 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7314 	       sizeof(struct hci_ev_hardware_error)),
7315 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7316 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7317 	       sizeof(struct hci_ev_role_change)),
7318 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7319 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7320 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7321 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7322 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7323 	       sizeof(struct hci_ev_mode_change)),
7324 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7325 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7326 	       sizeof(struct hci_ev_pin_code_req)),
7327 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7328 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7329 	       sizeof(struct hci_ev_link_key_req)),
7330 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7331 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7332 	       sizeof(struct hci_ev_link_key_notify)),
7333 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7334 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7335 	       sizeof(struct hci_ev_clock_offset)),
7336 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7337 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7338 	       sizeof(struct hci_ev_pkt_type_change)),
7339 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7340 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7341 	       sizeof(struct hci_ev_pscan_rep_mode)),
7342 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7343 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7344 		  hci_inquiry_result_with_rssi_evt,
7345 		  sizeof(struct hci_ev_inquiry_result_rssi),
7346 		  HCI_MAX_EVENT_SIZE),
7347 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7348 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7349 	       sizeof(struct hci_ev_remote_ext_features)),
7350 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7351 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7352 	       sizeof(struct hci_ev_sync_conn_complete)),
7353 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7354 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7355 		  hci_extended_inquiry_result_evt,
7356 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7357 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7358 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7359 	       sizeof(struct hci_ev_key_refresh_complete)),
7360 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7361 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7362 	       sizeof(struct hci_ev_io_capa_request)),
7363 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7364 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7365 	       sizeof(struct hci_ev_io_capa_reply)),
7366 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7367 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7368 	       sizeof(struct hci_ev_user_confirm_req)),
7369 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7370 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7371 	       sizeof(struct hci_ev_user_passkey_req)),
7372 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7373 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7374 	       sizeof(struct hci_ev_remote_oob_data_request)),
7375 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7376 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7377 	       sizeof(struct hci_ev_simple_pair_complete)),
7378 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7379 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7380 	       sizeof(struct hci_ev_user_passkey_notify)),
7381 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7382 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7383 	       sizeof(struct hci_ev_keypress_notify)),
7384 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7385 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7386 	       sizeof(struct hci_ev_remote_host_features)),
7387 	/* [0x3e = HCI_EV_LE_META] */
7388 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7389 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7390 	/* [0xff = HCI_EV_VENDOR] */
7391 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7392 };
7393 
hci_event_func(struct hci_dev * hdev,u8 event,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7394 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7395 			   u16 *opcode, u8 *status,
7396 			   hci_req_complete_t *req_complete,
7397 			   hci_req_complete_skb_t *req_complete_skb)
7398 {
7399 	const struct hci_ev *ev = &hci_ev_table[event];
7400 	void *data;
7401 
7402 	if (!ev->func)
7403 		return;
7404 
7405 	if (skb->len < ev->min_len) {
7406 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7407 			   event, skb->len, ev->min_len);
7408 		return;
7409 	}
7410 
7411 	/* Just warn if the length is over max_len size it still be
7412 	 * possible to partially parse the event so leave to callback to
7413 	 * decide if that is acceptable.
7414 	 */
7415 	if (skb->len > ev->max_len)
7416 		bt_dev_warn_ratelimited(hdev,
7417 					"unexpected event 0x%2.2x length: %u > %u",
7418 					event, skb->len, ev->max_len);
7419 
7420 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7421 	if (!data)
7422 		return;
7423 
7424 	if (ev->req)
7425 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7426 			     req_complete_skb);
7427 	else
7428 		ev->func(hdev, data, skb);
7429 }
7430 
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)7431 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7432 {
7433 	struct hci_event_hdr *hdr = (void *) skb->data;
7434 	hci_req_complete_t req_complete = NULL;
7435 	hci_req_complete_skb_t req_complete_skb = NULL;
7436 	struct sk_buff *orig_skb = NULL;
7437 	u8 status = 0, event, req_evt = 0;
7438 	u16 opcode = HCI_OP_NOP;
7439 
7440 	if (skb->len < sizeof(*hdr)) {
7441 		bt_dev_err(hdev, "Malformed HCI Event");
7442 		goto done;
7443 	}
7444 
7445 	kfree_skb(hdev->recv_event);
7446 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7447 
7448 	event = hdr->evt;
7449 	if (!event) {
7450 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7451 			    event);
7452 		goto done;
7453 	}
7454 
7455 	/* Only match event if command OGF is not for LE */
7456 	if (hdev->req_skb &&
7457 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7458 	    hci_skb_event(hdev->req_skb) == event) {
7459 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7460 				     status, &req_complete, &req_complete_skb);
7461 		req_evt = event;
7462 	}
7463 
7464 	/* If it looks like we might end up having to call
7465 	 * req_complete_skb, store a pristine copy of the skb since the
7466 	 * various handlers may modify the original one through
7467 	 * skb_pull() calls, etc.
7468 	 */
7469 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7470 	    event == HCI_EV_CMD_COMPLETE)
7471 		orig_skb = skb_clone(skb, GFP_KERNEL);
7472 
7473 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7474 
7475 	/* Store wake reason if we're suspended */
7476 	hci_store_wake_reason(hdev, event, skb);
7477 
7478 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7479 
7480 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7481 		       &req_complete_skb);
7482 
7483 	if (req_complete) {
7484 		req_complete(hdev, status, opcode);
7485 	} else if (req_complete_skb) {
7486 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7487 			kfree_skb(orig_skb);
7488 			orig_skb = NULL;
7489 		}
7490 		req_complete_skb(hdev, status, opcode, orig_skb);
7491 	}
7492 
7493 done:
7494 	kfree_skb(orig_skb);
7495 	kfree_skb(skb);
7496 	hdev->stat.evt_rx++;
7497 }
7498