xref: /openbmc/linux/net/bluetooth/hci_core.c (revision c8525821)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
66 /* Get HCI device by index.
67  * Device is held on return. */
hci_dev_get(int index)68 struct hci_dev *hci_dev_get(int index)
69 {
70 	struct hci_dev *hdev = NULL, *d;
71 
72 	BT_DBG("%d", index);
73 
74 	if (index < 0)
75 		return NULL;
76 
77 	read_lock(&hci_dev_list_lock);
78 	list_for_each_entry(d, &hci_dev_list, list) {
79 		if (d->id == index) {
80 			hdev = hci_dev_hold(d);
81 			break;
82 		}
83 	}
84 	read_unlock(&hci_dev_list_lock);
85 	return hdev;
86 }
87 
88 /* ---- Inquiry support ---- */
89 
hci_discovery_active(struct hci_dev * hdev)90 bool hci_discovery_active(struct hci_dev *hdev)
91 {
92 	struct discovery_state *discov = &hdev->discovery;
93 
94 	switch (discov->state) {
95 	case DISCOVERY_FINDING:
96 	case DISCOVERY_RESOLVING:
97 		return true;
98 
99 	default:
100 		return false;
101 	}
102 }
103 
hci_discovery_set_state(struct hci_dev * hdev,int state)104 void hci_discovery_set_state(struct hci_dev *hdev, int state)
105 {
106 	int old_state = hdev->discovery.state;
107 
108 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
109 
110 	if (old_state == state)
111 		return;
112 
113 	hdev->discovery.state = state;
114 
115 	switch (state) {
116 	case DISCOVERY_STOPPED:
117 		hci_update_passive_scan(hdev);
118 
119 		if (old_state != DISCOVERY_STARTING)
120 			mgmt_discovering(hdev, 0);
121 		break;
122 	case DISCOVERY_STARTING:
123 		break;
124 	case DISCOVERY_FINDING:
125 		mgmt_discovering(hdev, 1);
126 		break;
127 	case DISCOVERY_RESOLVING:
128 		break;
129 	case DISCOVERY_STOPPING:
130 		break;
131 	}
132 }
133 
hci_inquiry_cache_flush(struct hci_dev * hdev)134 void hci_inquiry_cache_flush(struct hci_dev *hdev)
135 {
136 	struct discovery_state *cache = &hdev->discovery;
137 	struct inquiry_entry *p, *n;
138 
139 	list_for_each_entry_safe(p, n, &cache->all, all) {
140 		list_del(&p->all);
141 		kfree(p);
142 	}
143 
144 	INIT_LIST_HEAD(&cache->unknown);
145 	INIT_LIST_HEAD(&cache->resolve);
146 }
147 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)148 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
149 					       bdaddr_t *bdaddr)
150 {
151 	struct discovery_state *cache = &hdev->discovery;
152 	struct inquiry_entry *e;
153 
154 	BT_DBG("cache %p, %pMR", cache, bdaddr);
155 
156 	list_for_each_entry(e, &cache->all, all) {
157 		if (!bacmp(&e->data.bdaddr, bdaddr))
158 			return e;
159 	}
160 
161 	return NULL;
162 }
163 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)164 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
165 						       bdaddr_t *bdaddr)
166 {
167 	struct discovery_state *cache = &hdev->discovery;
168 	struct inquiry_entry *e;
169 
170 	BT_DBG("cache %p, %pMR", cache, bdaddr);
171 
172 	list_for_each_entry(e, &cache->unknown, list) {
173 		if (!bacmp(&e->data.bdaddr, bdaddr))
174 			return e;
175 	}
176 
177 	return NULL;
178 }
179 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)180 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
181 						       bdaddr_t *bdaddr,
182 						       int state)
183 {
184 	struct discovery_state *cache = &hdev->discovery;
185 	struct inquiry_entry *e;
186 
187 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
188 
189 	list_for_each_entry(e, &cache->resolve, list) {
190 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
191 			return e;
192 		if (!bacmp(&e->data.bdaddr, bdaddr))
193 			return e;
194 	}
195 
196 	return NULL;
197 }
198 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)199 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
200 				      struct inquiry_entry *ie)
201 {
202 	struct discovery_state *cache = &hdev->discovery;
203 	struct list_head *pos = &cache->resolve;
204 	struct inquiry_entry *p;
205 
206 	list_del(&ie->list);
207 
208 	list_for_each_entry(p, &cache->resolve, list) {
209 		if (p->name_state != NAME_PENDING &&
210 		    abs(p->data.rssi) >= abs(ie->data.rssi))
211 			break;
212 		pos = &p->list;
213 	}
214 
215 	list_add(&ie->list, pos);
216 }
217 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)218 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
219 			     bool name_known)
220 {
221 	struct discovery_state *cache = &hdev->discovery;
222 	struct inquiry_entry *ie;
223 	u32 flags = 0;
224 
225 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
226 
227 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
228 
229 	if (!data->ssp_mode)
230 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
231 
232 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
233 	if (ie) {
234 		if (!ie->data.ssp_mode)
235 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
236 
237 		if (ie->name_state == NAME_NEEDED &&
238 		    data->rssi != ie->data.rssi) {
239 			ie->data.rssi = data->rssi;
240 			hci_inquiry_cache_update_resolve(hdev, ie);
241 		}
242 
243 		goto update;
244 	}
245 
246 	/* Entry not in the cache. Add new one. */
247 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
248 	if (!ie) {
249 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
250 		goto done;
251 	}
252 
253 	list_add(&ie->all, &cache->all);
254 
255 	if (name_known) {
256 		ie->name_state = NAME_KNOWN;
257 	} else {
258 		ie->name_state = NAME_NOT_KNOWN;
259 		list_add(&ie->list, &cache->unknown);
260 	}
261 
262 update:
263 	if (name_known && ie->name_state != NAME_KNOWN &&
264 	    ie->name_state != NAME_PENDING) {
265 		ie->name_state = NAME_KNOWN;
266 		list_del(&ie->list);
267 	}
268 
269 	memcpy(&ie->data, data, sizeof(*data));
270 	ie->timestamp = jiffies;
271 	cache->timestamp = jiffies;
272 
273 	if (ie->name_state == NAME_NOT_KNOWN)
274 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
275 
276 done:
277 	return flags;
278 }
279 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)280 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
281 {
282 	struct discovery_state *cache = &hdev->discovery;
283 	struct inquiry_info *info = (struct inquiry_info *) buf;
284 	struct inquiry_entry *e;
285 	int copied = 0;
286 
287 	list_for_each_entry(e, &cache->all, all) {
288 		struct inquiry_data *data = &e->data;
289 
290 		if (copied >= num)
291 			break;
292 
293 		bacpy(&info->bdaddr, &data->bdaddr);
294 		info->pscan_rep_mode	= data->pscan_rep_mode;
295 		info->pscan_period_mode	= data->pscan_period_mode;
296 		info->pscan_mode	= data->pscan_mode;
297 		memcpy(info->dev_class, data->dev_class, 3);
298 		info->clock_offset	= data->clock_offset;
299 
300 		info++;
301 		copied++;
302 	}
303 
304 	BT_DBG("cache %p, copied %d", cache, copied);
305 	return copied;
306 }
307 
hci_inq_req(struct hci_request * req,unsigned long opt)308 static int hci_inq_req(struct hci_request *req, unsigned long opt)
309 {
310 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
311 	struct hci_dev *hdev = req->hdev;
312 	struct hci_cp_inquiry cp;
313 
314 	BT_DBG("%s", hdev->name);
315 
316 	if (test_bit(HCI_INQUIRY, &hdev->flags))
317 		return 0;
318 
319 	/* Start Inquiry */
320 	memcpy(&cp.lap, &ir->lap, 3);
321 	cp.length  = ir->length;
322 	cp.num_rsp = ir->num_rsp;
323 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
324 
325 	return 0;
326 }
327 
hci_inquiry(void __user * arg)328 int hci_inquiry(void __user *arg)
329 {
330 	__u8 __user *ptr = arg;
331 	struct hci_inquiry_req ir;
332 	struct hci_dev *hdev;
333 	int err = 0, do_inquiry = 0, max_rsp;
334 	long timeo;
335 	__u8 *buf;
336 
337 	if (copy_from_user(&ir, ptr, sizeof(ir)))
338 		return -EFAULT;
339 
340 	hdev = hci_dev_get(ir.dev_id);
341 	if (!hdev)
342 		return -ENODEV;
343 
344 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
345 		err = -EBUSY;
346 		goto done;
347 	}
348 
349 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
350 		err = -EOPNOTSUPP;
351 		goto done;
352 	}
353 
354 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
355 		err = -EOPNOTSUPP;
356 		goto done;
357 	}
358 
359 	/* Restrict maximum inquiry length to 60 seconds */
360 	if (ir.length > 60) {
361 		err = -EINVAL;
362 		goto done;
363 	}
364 
365 	hci_dev_lock(hdev);
366 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
367 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
368 		hci_inquiry_cache_flush(hdev);
369 		do_inquiry = 1;
370 	}
371 	hci_dev_unlock(hdev);
372 
373 	timeo = ir.length * msecs_to_jiffies(2000);
374 
375 	if (do_inquiry) {
376 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
377 				   timeo, NULL);
378 		if (err < 0)
379 			goto done;
380 
381 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
382 		 * cleared). If it is interrupted by a signal, return -EINTR.
383 		 */
384 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
385 				TASK_INTERRUPTIBLE)) {
386 			err = -EINTR;
387 			goto done;
388 		}
389 	}
390 
391 	/* for unlimited number of responses we will use buffer with
392 	 * 255 entries
393 	 */
394 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
395 
396 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
397 	 * copy it to the user space.
398 	 */
399 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
400 	if (!buf) {
401 		err = -ENOMEM;
402 		goto done;
403 	}
404 
405 	hci_dev_lock(hdev);
406 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
407 	hci_dev_unlock(hdev);
408 
409 	BT_DBG("num_rsp %d", ir.num_rsp);
410 
411 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
412 		ptr += sizeof(ir);
413 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
414 				 ir.num_rsp))
415 			err = -EFAULT;
416 	} else
417 		err = -EFAULT;
418 
419 	kfree(buf);
420 
421 done:
422 	hci_dev_put(hdev);
423 	return err;
424 }
425 
hci_dev_do_open(struct hci_dev * hdev)426 static int hci_dev_do_open(struct hci_dev *hdev)
427 {
428 	int ret = 0;
429 
430 	BT_DBG("%s %p", hdev->name, hdev);
431 
432 	hci_req_sync_lock(hdev);
433 
434 	ret = hci_dev_open_sync(hdev);
435 
436 	hci_req_sync_unlock(hdev);
437 	return ret;
438 }
439 
440 /* ---- HCI ioctl helpers ---- */
441 
hci_dev_open(__u16 dev)442 int hci_dev_open(__u16 dev)
443 {
444 	struct hci_dev *hdev;
445 	int err;
446 
447 	hdev = hci_dev_get(dev);
448 	if (!hdev)
449 		return -ENODEV;
450 
451 	/* Devices that are marked as unconfigured can only be powered
452 	 * up as user channel. Trying to bring them up as normal devices
453 	 * will result into a failure. Only user channel operation is
454 	 * possible.
455 	 *
456 	 * When this function is called for a user channel, the flag
457 	 * HCI_USER_CHANNEL will be set first before attempting to
458 	 * open the device.
459 	 */
460 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
461 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
462 		err = -EOPNOTSUPP;
463 		goto done;
464 	}
465 
466 	/* We need to ensure that no other power on/off work is pending
467 	 * before proceeding to call hci_dev_do_open. This is
468 	 * particularly important if the setup procedure has not yet
469 	 * completed.
470 	 */
471 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
472 		cancel_delayed_work(&hdev->power_off);
473 
474 	/* After this call it is guaranteed that the setup procedure
475 	 * has finished. This means that error conditions like RFKILL
476 	 * or no valid public or static random address apply.
477 	 */
478 	flush_workqueue(hdev->req_workqueue);
479 
480 	/* For controllers not using the management interface and that
481 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
482 	 * so that pairing works for them. Once the management interface
483 	 * is in use this bit will be cleared again and userspace has
484 	 * to explicitly enable it.
485 	 */
486 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
487 	    !hci_dev_test_flag(hdev, HCI_MGMT))
488 		hci_dev_set_flag(hdev, HCI_BONDABLE);
489 
490 	err = hci_dev_do_open(hdev);
491 
492 done:
493 	hci_dev_put(hdev);
494 	return err;
495 }
496 
hci_dev_do_close(struct hci_dev * hdev)497 int hci_dev_do_close(struct hci_dev *hdev)
498 {
499 	int err;
500 
501 	BT_DBG("%s %p", hdev->name, hdev);
502 
503 	hci_req_sync_lock(hdev);
504 
505 	err = hci_dev_close_sync(hdev);
506 
507 	hci_req_sync_unlock(hdev);
508 
509 	return err;
510 }
511 
hci_dev_close(__u16 dev)512 int hci_dev_close(__u16 dev)
513 {
514 	struct hci_dev *hdev;
515 	int err;
516 
517 	hdev = hci_dev_get(dev);
518 	if (!hdev)
519 		return -ENODEV;
520 
521 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
522 		err = -EBUSY;
523 		goto done;
524 	}
525 
526 	cancel_work_sync(&hdev->power_on);
527 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
528 		cancel_delayed_work(&hdev->power_off);
529 
530 	err = hci_dev_do_close(hdev);
531 
532 done:
533 	hci_dev_put(hdev);
534 	return err;
535 }
536 
hci_dev_do_reset(struct hci_dev * hdev)537 static int hci_dev_do_reset(struct hci_dev *hdev)
538 {
539 	int ret;
540 
541 	BT_DBG("%s %p", hdev->name, hdev);
542 
543 	hci_req_sync_lock(hdev);
544 
545 	/* Drop queues */
546 	skb_queue_purge(&hdev->rx_q);
547 	skb_queue_purge(&hdev->cmd_q);
548 
549 	/* Cancel these to avoid queueing non-chained pending work */
550 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
551 	/* Wait for
552 	 *
553 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
554 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
555 	 *
556 	 * inside RCU section to see the flag or complete scheduling.
557 	 */
558 	synchronize_rcu();
559 	/* Explicitly cancel works in case scheduled after setting the flag. */
560 	cancel_delayed_work(&hdev->cmd_timer);
561 	cancel_delayed_work(&hdev->ncmd_timer);
562 
563 	/* Avoid potential lockdep warnings from the *_flush() calls by
564 	 * ensuring the workqueue is empty up front.
565 	 */
566 	drain_workqueue(hdev->workqueue);
567 
568 	hci_dev_lock(hdev);
569 	hci_inquiry_cache_flush(hdev);
570 	hci_conn_hash_flush(hdev);
571 	hci_dev_unlock(hdev);
572 
573 	if (hdev->flush)
574 		hdev->flush(hdev);
575 
576 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
577 
578 	atomic_set(&hdev->cmd_cnt, 1);
579 	hdev->acl_cnt = 0;
580 	hdev->sco_cnt = 0;
581 	hdev->le_cnt = 0;
582 	hdev->iso_cnt = 0;
583 
584 	ret = hci_reset_sync(hdev);
585 
586 	hci_req_sync_unlock(hdev);
587 	return ret;
588 }
589 
hci_dev_reset(__u16 dev)590 int hci_dev_reset(__u16 dev)
591 {
592 	struct hci_dev *hdev;
593 	int err;
594 
595 	hdev = hci_dev_get(dev);
596 	if (!hdev)
597 		return -ENODEV;
598 
599 	if (!test_bit(HCI_UP, &hdev->flags)) {
600 		err = -ENETDOWN;
601 		goto done;
602 	}
603 
604 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
605 		err = -EBUSY;
606 		goto done;
607 	}
608 
609 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
610 		err = -EOPNOTSUPP;
611 		goto done;
612 	}
613 
614 	err = hci_dev_do_reset(hdev);
615 
616 done:
617 	hci_dev_put(hdev);
618 	return err;
619 }
620 
hci_dev_reset_stat(__u16 dev)621 int hci_dev_reset_stat(__u16 dev)
622 {
623 	struct hci_dev *hdev;
624 	int ret = 0;
625 
626 	hdev = hci_dev_get(dev);
627 	if (!hdev)
628 		return -ENODEV;
629 
630 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
631 		ret = -EBUSY;
632 		goto done;
633 	}
634 
635 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
636 		ret = -EOPNOTSUPP;
637 		goto done;
638 	}
639 
640 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
641 
642 done:
643 	hci_dev_put(hdev);
644 	return ret;
645 }
646 
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)647 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
648 {
649 	bool conn_changed, discov_changed;
650 
651 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
652 
653 	if ((scan & SCAN_PAGE))
654 		conn_changed = !hci_dev_test_and_set_flag(hdev,
655 							  HCI_CONNECTABLE);
656 	else
657 		conn_changed = hci_dev_test_and_clear_flag(hdev,
658 							   HCI_CONNECTABLE);
659 
660 	if ((scan & SCAN_INQUIRY)) {
661 		discov_changed = !hci_dev_test_and_set_flag(hdev,
662 							    HCI_DISCOVERABLE);
663 	} else {
664 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
665 		discov_changed = hci_dev_test_and_clear_flag(hdev,
666 							     HCI_DISCOVERABLE);
667 	}
668 
669 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
670 		return;
671 
672 	if (conn_changed || discov_changed) {
673 		/* In case this was disabled through mgmt */
674 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
675 
676 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
677 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
678 
679 		mgmt_new_settings(hdev);
680 	}
681 }
682 
hci_dev_cmd(unsigned int cmd,void __user * arg)683 int hci_dev_cmd(unsigned int cmd, void __user *arg)
684 {
685 	struct hci_dev *hdev;
686 	struct hci_dev_req dr;
687 	__le16 policy;
688 	int err = 0;
689 
690 	if (copy_from_user(&dr, arg, sizeof(dr)))
691 		return -EFAULT;
692 
693 	hdev = hci_dev_get(dr.dev_id);
694 	if (!hdev)
695 		return -ENODEV;
696 
697 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
698 		err = -EBUSY;
699 		goto done;
700 	}
701 
702 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
703 		err = -EOPNOTSUPP;
704 		goto done;
705 	}
706 
707 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
708 		err = -EOPNOTSUPP;
709 		goto done;
710 	}
711 
712 	switch (cmd) {
713 	case HCISETAUTH:
714 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
715 					    1, &dr.dev_opt, HCI_CMD_TIMEOUT);
716 		break;
717 
718 	case HCISETENCRYPT:
719 		if (!lmp_encrypt_capable(hdev)) {
720 			err = -EOPNOTSUPP;
721 			break;
722 		}
723 
724 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
725 			/* Auth must be enabled first */
726 			err = __hci_cmd_sync_status(hdev,
727 						    HCI_OP_WRITE_AUTH_ENABLE,
728 						    1, &dr.dev_opt,
729 						    HCI_CMD_TIMEOUT);
730 			if (err)
731 				break;
732 		}
733 
734 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
735 					    1, &dr.dev_opt,
736 					    HCI_CMD_TIMEOUT);
737 		break;
738 
739 	case HCISETSCAN:
740 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
741 					    1, &dr.dev_opt,
742 					    HCI_CMD_TIMEOUT);
743 
744 		/* Ensure that the connectable and discoverable states
745 		 * get correctly modified as this was a non-mgmt change.
746 		 */
747 		if (!err)
748 			hci_update_passive_scan_state(hdev, dr.dev_opt);
749 		break;
750 
751 	case HCISETLINKPOL:
752 		policy = cpu_to_le16(dr.dev_opt);
753 
754 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
755 					    2, &policy,
756 					    HCI_CMD_TIMEOUT);
757 		break;
758 
759 	case HCISETLINKMODE:
760 		hdev->link_mode = ((__u16) dr.dev_opt) &
761 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
762 		break;
763 
764 	case HCISETPTYPE:
765 		if (hdev->pkt_type == (__u16) dr.dev_opt)
766 			break;
767 
768 		hdev->pkt_type = (__u16) dr.dev_opt;
769 		mgmt_phy_configuration_changed(hdev, NULL);
770 		break;
771 
772 	case HCISETACLMTU:
773 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
774 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
775 		break;
776 
777 	case HCISETSCOMTU:
778 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
779 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
780 		break;
781 
782 	default:
783 		err = -EINVAL;
784 		break;
785 	}
786 
787 done:
788 	hci_dev_put(hdev);
789 	return err;
790 }
791 
hci_get_dev_list(void __user * arg)792 int hci_get_dev_list(void __user *arg)
793 {
794 	struct hci_dev *hdev;
795 	struct hci_dev_list_req *dl;
796 	struct hci_dev_req *dr;
797 	int n = 0, size, err;
798 	__u16 dev_num;
799 
800 	if (get_user(dev_num, (__u16 __user *) arg))
801 		return -EFAULT;
802 
803 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
804 		return -EINVAL;
805 
806 	size = sizeof(*dl) + dev_num * sizeof(*dr);
807 
808 	dl = kzalloc(size, GFP_KERNEL);
809 	if (!dl)
810 		return -ENOMEM;
811 
812 	dr = dl->dev_req;
813 
814 	read_lock(&hci_dev_list_lock);
815 	list_for_each_entry(hdev, &hci_dev_list, list) {
816 		unsigned long flags = hdev->flags;
817 
818 		/* When the auto-off is configured it means the transport
819 		 * is running, but in that case still indicate that the
820 		 * device is actually down.
821 		 */
822 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
823 			flags &= ~BIT(HCI_UP);
824 
825 		(dr + n)->dev_id  = hdev->id;
826 		(dr + n)->dev_opt = flags;
827 
828 		if (++n >= dev_num)
829 			break;
830 	}
831 	read_unlock(&hci_dev_list_lock);
832 
833 	dl->dev_num = n;
834 	size = sizeof(*dl) + n * sizeof(*dr);
835 
836 	err = copy_to_user(arg, dl, size);
837 	kfree(dl);
838 
839 	return err ? -EFAULT : 0;
840 }
841 
hci_get_dev_info(void __user * arg)842 int hci_get_dev_info(void __user *arg)
843 {
844 	struct hci_dev *hdev;
845 	struct hci_dev_info di;
846 	unsigned long flags;
847 	int err = 0;
848 
849 	if (copy_from_user(&di, arg, sizeof(di)))
850 		return -EFAULT;
851 
852 	hdev = hci_dev_get(di.dev_id);
853 	if (!hdev)
854 		return -ENODEV;
855 
856 	/* When the auto-off is configured it means the transport
857 	 * is running, but in that case still indicate that the
858 	 * device is actually down.
859 	 */
860 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
861 		flags = hdev->flags & ~BIT(HCI_UP);
862 	else
863 		flags = hdev->flags;
864 
865 	strscpy(di.name, hdev->name, sizeof(di.name));
866 	di.bdaddr   = hdev->bdaddr;
867 	di.type     = (hdev->bus & 0x0f);
868 	di.flags    = flags;
869 	di.pkt_type = hdev->pkt_type;
870 	if (lmp_bredr_capable(hdev)) {
871 		di.acl_mtu  = hdev->acl_mtu;
872 		di.acl_pkts = hdev->acl_pkts;
873 		di.sco_mtu  = hdev->sco_mtu;
874 		di.sco_pkts = hdev->sco_pkts;
875 	} else {
876 		di.acl_mtu  = hdev->le_mtu;
877 		di.acl_pkts = hdev->le_pkts;
878 		di.sco_mtu  = 0;
879 		di.sco_pkts = 0;
880 	}
881 	di.link_policy = hdev->link_policy;
882 	di.link_mode   = hdev->link_mode;
883 
884 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
885 	memcpy(&di.features, &hdev->features, sizeof(di.features));
886 
887 	if (copy_to_user(arg, &di, sizeof(di)))
888 		err = -EFAULT;
889 
890 	hci_dev_put(hdev);
891 
892 	return err;
893 }
894 
895 /* ---- Interface to HCI drivers ---- */
896 
hci_rfkill_set_block(void * data,bool blocked)897 static int hci_rfkill_set_block(void *data, bool blocked)
898 {
899 	struct hci_dev *hdev = data;
900 
901 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
902 
903 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
904 		return -EBUSY;
905 
906 	if (blocked) {
907 		hci_dev_set_flag(hdev, HCI_RFKILLED);
908 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
909 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
910 			hci_dev_do_close(hdev);
911 	} else {
912 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
913 	}
914 
915 	return 0;
916 }
917 
918 static const struct rfkill_ops hci_rfkill_ops = {
919 	.set_block = hci_rfkill_set_block,
920 };
921 
hci_power_on(struct work_struct * work)922 static void hci_power_on(struct work_struct *work)
923 {
924 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
925 	int err;
926 
927 	BT_DBG("%s", hdev->name);
928 
929 	if (test_bit(HCI_UP, &hdev->flags) &&
930 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
931 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
932 		cancel_delayed_work(&hdev->power_off);
933 		err = hci_powered_update_sync(hdev);
934 		mgmt_power_on(hdev, err);
935 		return;
936 	}
937 
938 	err = hci_dev_do_open(hdev);
939 	if (err < 0) {
940 		hci_dev_lock(hdev);
941 		mgmt_set_powered_failed(hdev, err);
942 		hci_dev_unlock(hdev);
943 		return;
944 	}
945 
946 	/* During the HCI setup phase, a few error conditions are
947 	 * ignored and they need to be checked now. If they are still
948 	 * valid, it is important to turn the device back off.
949 	 */
950 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
951 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
952 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
953 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
954 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
955 		hci_dev_do_close(hdev);
956 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
957 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
958 				   HCI_AUTO_OFF_TIMEOUT);
959 	}
960 
961 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
962 		/* For unconfigured devices, set the HCI_RAW flag
963 		 * so that userspace can easily identify them.
964 		 */
965 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
966 			set_bit(HCI_RAW, &hdev->flags);
967 
968 		/* For fully configured devices, this will send
969 		 * the Index Added event. For unconfigured devices,
970 		 * it will send Unconfigued Index Added event.
971 		 *
972 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
973 		 * and no event will be send.
974 		 */
975 		mgmt_index_added(hdev);
976 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
977 		/* When the controller is now configured, then it
978 		 * is important to clear the HCI_RAW flag.
979 		 */
980 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
981 			clear_bit(HCI_RAW, &hdev->flags);
982 
983 		/* Powering on the controller with HCI_CONFIG set only
984 		 * happens with the transition from unconfigured to
985 		 * configured. This will send the Index Added event.
986 		 */
987 		mgmt_index_added(hdev);
988 	}
989 }
990 
hci_power_off(struct work_struct * work)991 static void hci_power_off(struct work_struct *work)
992 {
993 	struct hci_dev *hdev = container_of(work, struct hci_dev,
994 					    power_off.work);
995 
996 	BT_DBG("%s", hdev->name);
997 
998 	hci_dev_do_close(hdev);
999 }
1000 
hci_error_reset(struct work_struct * work)1001 static void hci_error_reset(struct work_struct *work)
1002 {
1003 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1004 
1005 	hci_dev_hold(hdev);
1006 	BT_DBG("%s", hdev->name);
1007 
1008 	if (hdev->hw_error)
1009 		hdev->hw_error(hdev, hdev->hw_error_code);
1010 	else
1011 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1012 
1013 	if (!hci_dev_do_close(hdev))
1014 		hci_dev_do_open(hdev);
1015 
1016 	hci_dev_put(hdev);
1017 }
1018 
hci_uuids_clear(struct hci_dev * hdev)1019 void hci_uuids_clear(struct hci_dev *hdev)
1020 {
1021 	struct bt_uuid *uuid, *tmp;
1022 
1023 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1024 		list_del(&uuid->list);
1025 		kfree(uuid);
1026 	}
1027 }
1028 
hci_link_keys_clear(struct hci_dev * hdev)1029 void hci_link_keys_clear(struct hci_dev *hdev)
1030 {
1031 	struct link_key *key, *tmp;
1032 
1033 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1034 		list_del_rcu(&key->list);
1035 		kfree_rcu(key, rcu);
1036 	}
1037 }
1038 
hci_smp_ltks_clear(struct hci_dev * hdev)1039 void hci_smp_ltks_clear(struct hci_dev *hdev)
1040 {
1041 	struct smp_ltk *k, *tmp;
1042 
1043 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1044 		list_del_rcu(&k->list);
1045 		kfree_rcu(k, rcu);
1046 	}
1047 }
1048 
hci_smp_irks_clear(struct hci_dev * hdev)1049 void hci_smp_irks_clear(struct hci_dev *hdev)
1050 {
1051 	struct smp_irk *k, *tmp;
1052 
1053 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1054 		list_del_rcu(&k->list);
1055 		kfree_rcu(k, rcu);
1056 	}
1057 }
1058 
hci_blocked_keys_clear(struct hci_dev * hdev)1059 void hci_blocked_keys_clear(struct hci_dev *hdev)
1060 {
1061 	struct blocked_key *b, *tmp;
1062 
1063 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1064 		list_del_rcu(&b->list);
1065 		kfree_rcu(b, rcu);
1066 	}
1067 }
1068 
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1069 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1070 {
1071 	bool blocked = false;
1072 	struct blocked_key *b;
1073 
1074 	rcu_read_lock();
1075 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1076 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1077 			blocked = true;
1078 			break;
1079 		}
1080 	}
1081 
1082 	rcu_read_unlock();
1083 	return blocked;
1084 }
1085 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1086 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1087 {
1088 	struct link_key *k;
1089 
1090 	rcu_read_lock();
1091 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1092 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1093 			rcu_read_unlock();
1094 
1095 			if (hci_is_blocked_key(hdev,
1096 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1097 					       k->val)) {
1098 				bt_dev_warn_ratelimited(hdev,
1099 							"Link key blocked for %pMR",
1100 							&k->bdaddr);
1101 				return NULL;
1102 			}
1103 
1104 			return k;
1105 		}
1106 	}
1107 	rcu_read_unlock();
1108 
1109 	return NULL;
1110 }
1111 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1112 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1113 			       u8 key_type, u8 old_key_type)
1114 {
1115 	/* Legacy key */
1116 	if (key_type < 0x03)
1117 		return true;
1118 
1119 	/* Debug keys are insecure so don't store them persistently */
1120 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1121 		return false;
1122 
1123 	/* Changed combination key and there's no previous one */
1124 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1125 		return false;
1126 
1127 	/* Security mode 3 case */
1128 	if (!conn)
1129 		return true;
1130 
1131 	/* BR/EDR key derived using SC from an LE link */
1132 	if (conn->type == LE_LINK)
1133 		return true;
1134 
1135 	/* Neither local nor remote side had no-bonding as requirement */
1136 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1137 		return true;
1138 
1139 	/* Local side had dedicated bonding as requirement */
1140 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1141 		return true;
1142 
1143 	/* Remote side had dedicated bonding as requirement */
1144 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1145 		return true;
1146 
1147 	/* If none of the above criteria match, then don't store the key
1148 	 * persistently */
1149 	return false;
1150 }
1151 
ltk_role(u8 type)1152 static u8 ltk_role(u8 type)
1153 {
1154 	if (type == SMP_LTK)
1155 		return HCI_ROLE_MASTER;
1156 
1157 	return HCI_ROLE_SLAVE;
1158 }
1159 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1160 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1161 			     u8 addr_type, u8 role)
1162 {
1163 	struct smp_ltk *k;
1164 
1165 	rcu_read_lock();
1166 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1167 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1168 			continue;
1169 
1170 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1171 			rcu_read_unlock();
1172 
1173 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1174 					       k->val)) {
1175 				bt_dev_warn_ratelimited(hdev,
1176 							"LTK blocked for %pMR",
1177 							&k->bdaddr);
1178 				return NULL;
1179 			}
1180 
1181 			return k;
1182 		}
1183 	}
1184 	rcu_read_unlock();
1185 
1186 	return NULL;
1187 }
1188 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1189 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1190 {
1191 	struct smp_irk *irk_to_return = NULL;
1192 	struct smp_irk *irk;
1193 
1194 	rcu_read_lock();
1195 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1196 		if (!bacmp(&irk->rpa, rpa)) {
1197 			irk_to_return = irk;
1198 			goto done;
1199 		}
1200 	}
1201 
1202 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1203 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1204 			bacpy(&irk->rpa, rpa);
1205 			irk_to_return = irk;
1206 			goto done;
1207 		}
1208 	}
1209 
1210 done:
1211 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1212 						irk_to_return->val)) {
1213 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1214 					&irk_to_return->bdaddr);
1215 		irk_to_return = NULL;
1216 	}
1217 
1218 	rcu_read_unlock();
1219 
1220 	return irk_to_return;
1221 }
1222 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1223 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1224 				     u8 addr_type)
1225 {
1226 	struct smp_irk *irk_to_return = NULL;
1227 	struct smp_irk *irk;
1228 
1229 	/* Identity Address must be public or static random */
1230 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1231 		return NULL;
1232 
1233 	rcu_read_lock();
1234 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1235 		if (addr_type == irk->addr_type &&
1236 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1237 			irk_to_return = irk;
1238 			goto done;
1239 		}
1240 	}
1241 
1242 done:
1243 
1244 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1245 						irk_to_return->val)) {
1246 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1247 					&irk_to_return->bdaddr);
1248 		irk_to_return = NULL;
1249 	}
1250 
1251 	rcu_read_unlock();
1252 
1253 	return irk_to_return;
1254 }
1255 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1256 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1257 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1258 				  u8 pin_len, bool *persistent)
1259 {
1260 	struct link_key *key, *old_key;
1261 	u8 old_key_type;
1262 
1263 	old_key = hci_find_link_key(hdev, bdaddr);
1264 	if (old_key) {
1265 		old_key_type = old_key->type;
1266 		key = old_key;
1267 	} else {
1268 		old_key_type = conn ? conn->key_type : 0xff;
1269 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1270 		if (!key)
1271 			return NULL;
1272 		list_add_rcu(&key->list, &hdev->link_keys);
1273 	}
1274 
1275 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1276 
1277 	/* Some buggy controller combinations generate a changed
1278 	 * combination key for legacy pairing even when there's no
1279 	 * previous key */
1280 	if (type == HCI_LK_CHANGED_COMBINATION &&
1281 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1282 		type = HCI_LK_COMBINATION;
1283 		if (conn)
1284 			conn->key_type = type;
1285 	}
1286 
1287 	bacpy(&key->bdaddr, bdaddr);
1288 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1289 	key->pin_len = pin_len;
1290 
1291 	if (type == HCI_LK_CHANGED_COMBINATION)
1292 		key->type = old_key_type;
1293 	else
1294 		key->type = type;
1295 
1296 	if (persistent)
1297 		*persistent = hci_persistent_key(hdev, conn, type,
1298 						 old_key_type);
1299 
1300 	return key;
1301 }
1302 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1303 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1304 			    u8 addr_type, u8 type, u8 authenticated,
1305 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1306 {
1307 	struct smp_ltk *key, *old_key;
1308 	u8 role = ltk_role(type);
1309 
1310 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1311 	if (old_key)
1312 		key = old_key;
1313 	else {
1314 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1315 		if (!key)
1316 			return NULL;
1317 		list_add_rcu(&key->list, &hdev->long_term_keys);
1318 	}
1319 
1320 	bacpy(&key->bdaddr, bdaddr);
1321 	key->bdaddr_type = addr_type;
1322 	memcpy(key->val, tk, sizeof(key->val));
1323 	key->authenticated = authenticated;
1324 	key->ediv = ediv;
1325 	key->rand = rand;
1326 	key->enc_size = enc_size;
1327 	key->type = type;
1328 
1329 	return key;
1330 }
1331 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1332 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1333 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1334 {
1335 	struct smp_irk *irk;
1336 
1337 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1338 	if (!irk) {
1339 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1340 		if (!irk)
1341 			return NULL;
1342 
1343 		bacpy(&irk->bdaddr, bdaddr);
1344 		irk->addr_type = addr_type;
1345 
1346 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1347 	}
1348 
1349 	memcpy(irk->val, val, 16);
1350 	bacpy(&irk->rpa, rpa);
1351 
1352 	return irk;
1353 }
1354 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1355 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1356 {
1357 	struct link_key *key;
1358 
1359 	key = hci_find_link_key(hdev, bdaddr);
1360 	if (!key)
1361 		return -ENOENT;
1362 
1363 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1364 
1365 	list_del_rcu(&key->list);
1366 	kfree_rcu(key, rcu);
1367 
1368 	return 0;
1369 }
1370 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1371 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1372 {
1373 	struct smp_ltk *k, *tmp;
1374 	int removed = 0;
1375 
1376 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1377 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1378 			continue;
1379 
1380 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1381 
1382 		list_del_rcu(&k->list);
1383 		kfree_rcu(k, rcu);
1384 		removed++;
1385 	}
1386 
1387 	return removed ? 0 : -ENOENT;
1388 }
1389 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1390 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1391 {
1392 	struct smp_irk *k, *tmp;
1393 
1394 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1395 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1396 			continue;
1397 
1398 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1399 
1400 		list_del_rcu(&k->list);
1401 		kfree_rcu(k, rcu);
1402 	}
1403 }
1404 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1405 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1406 {
1407 	struct smp_ltk *k;
1408 	struct smp_irk *irk;
1409 	u8 addr_type;
1410 
1411 	if (type == BDADDR_BREDR) {
1412 		if (hci_find_link_key(hdev, bdaddr))
1413 			return true;
1414 		return false;
1415 	}
1416 
1417 	/* Convert to HCI addr type which struct smp_ltk uses */
1418 	if (type == BDADDR_LE_PUBLIC)
1419 		addr_type = ADDR_LE_DEV_PUBLIC;
1420 	else
1421 		addr_type = ADDR_LE_DEV_RANDOM;
1422 
1423 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1424 	if (irk) {
1425 		bdaddr = &irk->bdaddr;
1426 		addr_type = irk->addr_type;
1427 	}
1428 
1429 	rcu_read_lock();
1430 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1431 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1432 			rcu_read_unlock();
1433 			return true;
1434 		}
1435 	}
1436 	rcu_read_unlock();
1437 
1438 	return false;
1439 }
1440 
1441 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1442 static void hci_cmd_timeout(struct work_struct *work)
1443 {
1444 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1445 					    cmd_timer.work);
1446 
1447 	if (hdev->req_skb) {
1448 		u16 opcode = hci_skb_opcode(hdev->req_skb);
1449 
1450 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1451 
1452 		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1453 	} else {
1454 		bt_dev_err(hdev, "command tx timeout");
1455 	}
1456 
1457 	if (hdev->cmd_timeout)
1458 		hdev->cmd_timeout(hdev);
1459 
1460 	atomic_set(&hdev->cmd_cnt, 1);
1461 	queue_work(hdev->workqueue, &hdev->cmd_work);
1462 }
1463 
1464 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1465 static void hci_ncmd_timeout(struct work_struct *work)
1466 {
1467 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1468 					    ncmd_timer.work);
1469 
1470 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1471 
1472 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1473 	 * triggers since the procedure has its own timeout handling.
1474 	 */
1475 	if (test_bit(HCI_INIT, &hdev->flags))
1476 		return;
1477 
1478 	/* This is an irrecoverable state, inject hardware error event */
1479 	hci_reset_dev(hdev);
1480 }
1481 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1482 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1483 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1484 {
1485 	struct oob_data *data;
1486 
1487 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1488 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1489 			continue;
1490 		if (data->bdaddr_type != bdaddr_type)
1491 			continue;
1492 		return data;
1493 	}
1494 
1495 	return NULL;
1496 }
1497 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1498 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1499 			       u8 bdaddr_type)
1500 {
1501 	struct oob_data *data;
1502 
1503 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1504 	if (!data)
1505 		return -ENOENT;
1506 
1507 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1508 
1509 	list_del(&data->list);
1510 	kfree(data);
1511 
1512 	return 0;
1513 }
1514 
hci_remote_oob_data_clear(struct hci_dev * hdev)1515 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1516 {
1517 	struct oob_data *data, *n;
1518 
1519 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1520 		list_del(&data->list);
1521 		kfree(data);
1522 	}
1523 }
1524 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1525 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1526 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1527 			    u8 *hash256, u8 *rand256)
1528 {
1529 	struct oob_data *data;
1530 
1531 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1532 	if (!data) {
1533 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1534 		if (!data)
1535 			return -ENOMEM;
1536 
1537 		bacpy(&data->bdaddr, bdaddr);
1538 		data->bdaddr_type = bdaddr_type;
1539 		list_add(&data->list, &hdev->remote_oob_data);
1540 	}
1541 
1542 	if (hash192 && rand192) {
1543 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1544 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1545 		if (hash256 && rand256)
1546 			data->present = 0x03;
1547 	} else {
1548 		memset(data->hash192, 0, sizeof(data->hash192));
1549 		memset(data->rand192, 0, sizeof(data->rand192));
1550 		if (hash256 && rand256)
1551 			data->present = 0x02;
1552 		else
1553 			data->present = 0x00;
1554 	}
1555 
1556 	if (hash256 && rand256) {
1557 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1558 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1559 	} else {
1560 		memset(data->hash256, 0, sizeof(data->hash256));
1561 		memset(data->rand256, 0, sizeof(data->rand256));
1562 		if (hash192 && rand192)
1563 			data->present = 0x01;
1564 	}
1565 
1566 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1567 
1568 	return 0;
1569 }
1570 
1571 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1572 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1573 {
1574 	struct adv_info *adv_instance;
1575 
1576 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1577 		if (adv_instance->instance == instance)
1578 			return adv_instance;
1579 	}
1580 
1581 	return NULL;
1582 }
1583 
1584 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1585 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1586 {
1587 	struct adv_info *cur_instance;
1588 
1589 	cur_instance = hci_find_adv_instance(hdev, instance);
1590 	if (!cur_instance)
1591 		return NULL;
1592 
1593 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1594 					    struct adv_info, list))
1595 		return list_first_entry(&hdev->adv_instances,
1596 						 struct adv_info, list);
1597 	else
1598 		return list_next_entry(cur_instance, list);
1599 }
1600 
1601 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1602 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1603 {
1604 	struct adv_info *adv_instance;
1605 
1606 	adv_instance = hci_find_adv_instance(hdev, instance);
1607 	if (!adv_instance)
1608 		return -ENOENT;
1609 
1610 	BT_DBG("%s removing %dMR", hdev->name, instance);
1611 
1612 	if (hdev->cur_adv_instance == instance) {
1613 		if (hdev->adv_instance_timeout) {
1614 			cancel_delayed_work(&hdev->adv_instance_expire);
1615 			hdev->adv_instance_timeout = 0;
1616 		}
1617 		hdev->cur_adv_instance = 0x00;
1618 	}
1619 
1620 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1621 
1622 	list_del(&adv_instance->list);
1623 	kfree(adv_instance);
1624 
1625 	hdev->adv_instance_cnt--;
1626 
1627 	return 0;
1628 }
1629 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1630 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1631 {
1632 	struct adv_info *adv_instance, *n;
1633 
1634 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1635 		adv_instance->rpa_expired = rpa_expired;
1636 }
1637 
1638 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1639 void hci_adv_instances_clear(struct hci_dev *hdev)
1640 {
1641 	struct adv_info *adv_instance, *n;
1642 
1643 	if (hdev->adv_instance_timeout) {
1644 		cancel_delayed_work(&hdev->adv_instance_expire);
1645 		hdev->adv_instance_timeout = 0;
1646 	}
1647 
1648 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1649 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1650 		list_del(&adv_instance->list);
1651 		kfree(adv_instance);
1652 	}
1653 
1654 	hdev->adv_instance_cnt = 0;
1655 	hdev->cur_adv_instance = 0x00;
1656 }
1657 
adv_instance_rpa_expired(struct work_struct * work)1658 static void adv_instance_rpa_expired(struct work_struct *work)
1659 {
1660 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1661 						     rpa_expired_cb.work);
1662 
1663 	BT_DBG("");
1664 
1665 	adv_instance->rpa_expired = true;
1666 }
1667 
1668 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1669 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1670 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1671 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1672 				      u16 timeout, u16 duration, s8 tx_power,
1673 				      u32 min_interval, u32 max_interval,
1674 				      u8 mesh_handle)
1675 {
1676 	struct adv_info *adv;
1677 
1678 	adv = hci_find_adv_instance(hdev, instance);
1679 	if (adv) {
1680 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1681 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1682 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1683 	} else {
1684 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1685 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1686 			return ERR_PTR(-EOVERFLOW);
1687 
1688 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1689 		if (!adv)
1690 			return ERR_PTR(-ENOMEM);
1691 
1692 		adv->pending = true;
1693 		adv->instance = instance;
1694 		list_add(&adv->list, &hdev->adv_instances);
1695 		hdev->adv_instance_cnt++;
1696 	}
1697 
1698 	adv->flags = flags;
1699 	adv->min_interval = min_interval;
1700 	adv->max_interval = max_interval;
1701 	adv->tx_power = tx_power;
1702 	/* Defining a mesh_handle changes the timing units to ms,
1703 	 * rather than seconds, and ties the instance to the requested
1704 	 * mesh_tx queue.
1705 	 */
1706 	adv->mesh = mesh_handle;
1707 
1708 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1709 				  scan_rsp_len, scan_rsp_data);
1710 
1711 	adv->timeout = timeout;
1712 	adv->remaining_time = timeout;
1713 
1714 	if (duration == 0)
1715 		adv->duration = hdev->def_multi_adv_rotation_duration;
1716 	else
1717 		adv->duration = duration;
1718 
1719 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1720 
1721 	BT_DBG("%s for %dMR", hdev->name, instance);
1722 
1723 	return adv;
1724 }
1725 
1726 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1727 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1728 				      u32 flags, u8 data_len, u8 *data,
1729 				      u32 min_interval, u32 max_interval)
1730 {
1731 	struct adv_info *adv;
1732 
1733 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1734 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1735 				   min_interval, max_interval, 0);
1736 	if (IS_ERR(adv))
1737 		return adv;
1738 
1739 	adv->periodic = true;
1740 	adv->per_adv_data_len = data_len;
1741 
1742 	if (data)
1743 		memcpy(adv->per_adv_data, data, data_len);
1744 
1745 	return adv;
1746 }
1747 
1748 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1749 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1750 			      u16 adv_data_len, u8 *adv_data,
1751 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1752 {
1753 	struct adv_info *adv;
1754 
1755 	adv = hci_find_adv_instance(hdev, instance);
1756 
1757 	/* If advertisement doesn't exist, we can't modify its data */
1758 	if (!adv)
1759 		return -ENOENT;
1760 
1761 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1762 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1763 		memcpy(adv->adv_data, adv_data, adv_data_len);
1764 		adv->adv_data_len = adv_data_len;
1765 		adv->adv_data_changed = true;
1766 	}
1767 
1768 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1769 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1770 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1771 		adv->scan_rsp_len = scan_rsp_len;
1772 		adv->scan_rsp_changed = true;
1773 	}
1774 
1775 	/* Mark as changed if there are flags which would affect it */
1776 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1777 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1778 		adv->scan_rsp_changed = true;
1779 
1780 	return 0;
1781 }
1782 
1783 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1784 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1785 {
1786 	u32 flags;
1787 	struct adv_info *adv;
1788 
1789 	if (instance == 0x00) {
1790 		/* Instance 0 always manages the "Tx Power" and "Flags"
1791 		 * fields
1792 		 */
1793 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1794 
1795 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1796 		 * corresponds to the "connectable" instance flag.
1797 		 */
1798 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1799 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1800 
1801 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1802 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1803 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1804 			flags |= MGMT_ADV_FLAG_DISCOV;
1805 
1806 		return flags;
1807 	}
1808 
1809 	adv = hci_find_adv_instance(hdev, instance);
1810 
1811 	/* Return 0 when we got an invalid instance identifier. */
1812 	if (!adv)
1813 		return 0;
1814 
1815 	return adv->flags;
1816 }
1817 
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1818 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1819 {
1820 	struct adv_info *adv;
1821 
1822 	/* Instance 0x00 always set local name */
1823 	if (instance == 0x00)
1824 		return true;
1825 
1826 	adv = hci_find_adv_instance(hdev, instance);
1827 	if (!adv)
1828 		return false;
1829 
1830 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1831 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1832 		return true;
1833 
1834 	return adv->scan_rsp_len ? true : false;
1835 }
1836 
1837 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1838 void hci_adv_monitors_clear(struct hci_dev *hdev)
1839 {
1840 	struct adv_monitor *monitor;
1841 	int handle;
1842 
1843 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1844 		hci_free_adv_monitor(hdev, monitor);
1845 
1846 	idr_destroy(&hdev->adv_monitors_idr);
1847 }
1848 
1849 /* Frees the monitor structure and do some bookkeepings.
1850  * This function requires the caller holds hdev->lock.
1851  */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1852 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1853 {
1854 	struct adv_pattern *pattern;
1855 	struct adv_pattern *tmp;
1856 
1857 	if (!monitor)
1858 		return;
1859 
1860 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1861 		list_del(&pattern->list);
1862 		kfree(pattern);
1863 	}
1864 
1865 	if (monitor->handle)
1866 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1867 
1868 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1869 		hdev->adv_monitors_cnt--;
1870 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1871 	}
1872 
1873 	kfree(monitor);
1874 }
1875 
1876 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1877  * also attempts to forward the request to the controller.
1878  * This function requires the caller holds hci_req_sync_lock.
1879  */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1880 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1881 {
1882 	int min, max, handle;
1883 	int status = 0;
1884 
1885 	if (!monitor)
1886 		return -EINVAL;
1887 
1888 	hci_dev_lock(hdev);
1889 
1890 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1891 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1892 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1893 			   GFP_KERNEL);
1894 
1895 	hci_dev_unlock(hdev);
1896 
1897 	if (handle < 0)
1898 		return handle;
1899 
1900 	monitor->handle = handle;
1901 
1902 	if (!hdev_is_powered(hdev))
1903 		return status;
1904 
1905 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1906 	case HCI_ADV_MONITOR_EXT_NONE:
1907 		bt_dev_dbg(hdev, "add monitor %d status %d",
1908 			   monitor->handle, status);
1909 		/* Message was not forwarded to controller - not an error */
1910 		break;
1911 
1912 	case HCI_ADV_MONITOR_EXT_MSFT:
1913 		status = msft_add_monitor_pattern(hdev, monitor);
1914 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1915 			   handle, status);
1916 		break;
1917 	}
1918 
1919 	return status;
1920 }
1921 
1922 /* Attempts to tell the controller and free the monitor. If somehow the
1923  * controller doesn't have a corresponding handle, remove anyway.
1924  * This function requires the caller holds hci_req_sync_lock.
1925  */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1926 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1927 				  struct adv_monitor *monitor)
1928 {
1929 	int status = 0;
1930 	int handle;
1931 
1932 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1933 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1934 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1935 			   monitor->handle, status);
1936 		goto free_monitor;
1937 
1938 	case HCI_ADV_MONITOR_EXT_MSFT:
1939 		handle = monitor->handle;
1940 		status = msft_remove_monitor(hdev, monitor);
1941 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1942 			   handle, status);
1943 		break;
1944 	}
1945 
1946 	/* In case no matching handle registered, just free the monitor */
1947 	if (status == -ENOENT)
1948 		goto free_monitor;
1949 
1950 	return status;
1951 
1952 free_monitor:
1953 	if (status == -ENOENT)
1954 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1955 			    monitor->handle);
1956 	hci_free_adv_monitor(hdev, monitor);
1957 
1958 	return status;
1959 }
1960 
1961 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1962 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1963 {
1964 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1965 
1966 	if (!monitor)
1967 		return -EINVAL;
1968 
1969 	return hci_remove_adv_monitor(hdev, monitor);
1970 }
1971 
1972 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1973 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1974 {
1975 	struct adv_monitor *monitor;
1976 	int idr_next_id = 0;
1977 	int status = 0;
1978 
1979 	while (1) {
1980 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1981 		if (!monitor)
1982 			break;
1983 
1984 		status = hci_remove_adv_monitor(hdev, monitor);
1985 		if (status)
1986 			return status;
1987 
1988 		idr_next_id++;
1989 	}
1990 
1991 	return status;
1992 }
1993 
1994 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)1995 bool hci_is_adv_monitoring(struct hci_dev *hdev)
1996 {
1997 	return !idr_is_empty(&hdev->adv_monitors_idr);
1998 }
1999 
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2000 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2001 {
2002 	if (msft_monitor_supported(hdev))
2003 		return HCI_ADV_MONITOR_EXT_MSFT;
2004 
2005 	return HCI_ADV_MONITOR_EXT_NONE;
2006 }
2007 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2008 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2009 					 bdaddr_t *bdaddr, u8 type)
2010 {
2011 	struct bdaddr_list *b;
2012 
2013 	list_for_each_entry(b, bdaddr_list, list) {
2014 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2015 			return b;
2016 	}
2017 
2018 	return NULL;
2019 }
2020 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2021 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2022 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2023 				u8 type)
2024 {
2025 	struct bdaddr_list_with_irk *b;
2026 
2027 	list_for_each_entry(b, bdaddr_list, list) {
2028 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2029 			return b;
2030 	}
2031 
2032 	return NULL;
2033 }
2034 
2035 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2036 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2037 				  bdaddr_t *bdaddr, u8 type)
2038 {
2039 	struct bdaddr_list_with_flags *b;
2040 
2041 	list_for_each_entry(b, bdaddr_list, list) {
2042 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2043 			return b;
2044 	}
2045 
2046 	return NULL;
2047 }
2048 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2049 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2050 {
2051 	struct bdaddr_list *b, *n;
2052 
2053 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2054 		list_del(&b->list);
2055 		kfree(b);
2056 	}
2057 }
2058 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2059 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2060 {
2061 	struct bdaddr_list *entry;
2062 
2063 	if (!bacmp(bdaddr, BDADDR_ANY))
2064 		return -EBADF;
2065 
2066 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2067 		return -EEXIST;
2068 
2069 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2070 	if (!entry)
2071 		return -ENOMEM;
2072 
2073 	bacpy(&entry->bdaddr, bdaddr);
2074 	entry->bdaddr_type = type;
2075 
2076 	list_add(&entry->list, list);
2077 
2078 	return 0;
2079 }
2080 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2081 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2082 					u8 type, u8 *peer_irk, u8 *local_irk)
2083 {
2084 	struct bdaddr_list_with_irk *entry;
2085 
2086 	if (!bacmp(bdaddr, BDADDR_ANY))
2087 		return -EBADF;
2088 
2089 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2090 		return -EEXIST;
2091 
2092 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2093 	if (!entry)
2094 		return -ENOMEM;
2095 
2096 	bacpy(&entry->bdaddr, bdaddr);
2097 	entry->bdaddr_type = type;
2098 
2099 	if (peer_irk)
2100 		memcpy(entry->peer_irk, peer_irk, 16);
2101 
2102 	if (local_irk)
2103 		memcpy(entry->local_irk, local_irk, 16);
2104 
2105 	list_add(&entry->list, list);
2106 
2107 	return 0;
2108 }
2109 
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2110 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2111 				   u8 type, u32 flags)
2112 {
2113 	struct bdaddr_list_with_flags *entry;
2114 
2115 	if (!bacmp(bdaddr, BDADDR_ANY))
2116 		return -EBADF;
2117 
2118 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2119 		return -EEXIST;
2120 
2121 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2122 	if (!entry)
2123 		return -ENOMEM;
2124 
2125 	bacpy(&entry->bdaddr, bdaddr);
2126 	entry->bdaddr_type = type;
2127 	entry->flags = flags;
2128 
2129 	list_add(&entry->list, list);
2130 
2131 	return 0;
2132 }
2133 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2134 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2135 {
2136 	struct bdaddr_list *entry;
2137 
2138 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2139 		hci_bdaddr_list_clear(list);
2140 		return 0;
2141 	}
2142 
2143 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2144 	if (!entry)
2145 		return -ENOENT;
2146 
2147 	list_del(&entry->list);
2148 	kfree(entry);
2149 
2150 	return 0;
2151 }
2152 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2153 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2154 							u8 type)
2155 {
2156 	struct bdaddr_list_with_irk *entry;
2157 
2158 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2159 		hci_bdaddr_list_clear(list);
2160 		return 0;
2161 	}
2162 
2163 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2164 	if (!entry)
2165 		return -ENOENT;
2166 
2167 	list_del(&entry->list);
2168 	kfree(entry);
2169 
2170 	return 0;
2171 }
2172 
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2173 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2174 				   u8 type)
2175 {
2176 	struct bdaddr_list_with_flags *entry;
2177 
2178 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2179 		hci_bdaddr_list_clear(list);
2180 		return 0;
2181 	}
2182 
2183 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2184 	if (!entry)
2185 		return -ENOENT;
2186 
2187 	list_del(&entry->list);
2188 	kfree(entry);
2189 
2190 	return 0;
2191 }
2192 
2193 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2194 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2195 					       bdaddr_t *addr, u8 addr_type)
2196 {
2197 	struct hci_conn_params *params;
2198 
2199 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2200 		if (bacmp(&params->addr, addr) == 0 &&
2201 		    params->addr_type == addr_type) {
2202 			return params;
2203 		}
2204 	}
2205 
2206 	return NULL;
2207 }
2208 
2209 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2210 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2211 						  bdaddr_t *addr, u8 addr_type)
2212 {
2213 	struct hci_conn_params *param;
2214 
2215 	rcu_read_lock();
2216 
2217 	list_for_each_entry_rcu(param, list, action) {
2218 		if (bacmp(&param->addr, addr) == 0 &&
2219 		    param->addr_type == addr_type) {
2220 			rcu_read_unlock();
2221 			return param;
2222 		}
2223 	}
2224 
2225 	rcu_read_unlock();
2226 
2227 	return NULL;
2228 }
2229 
2230 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2231 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2232 {
2233 	if (list_empty(&param->action))
2234 		return;
2235 
2236 	list_del_rcu(&param->action);
2237 	synchronize_rcu();
2238 	INIT_LIST_HEAD(&param->action);
2239 }
2240 
2241 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2242 void hci_pend_le_list_add(struct hci_conn_params *param,
2243 			  struct list_head *list)
2244 {
2245 	list_add_rcu(&param->action, list);
2246 }
2247 
2248 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2249 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2250 					    bdaddr_t *addr, u8 addr_type)
2251 {
2252 	struct hci_conn_params *params;
2253 
2254 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2255 	if (params)
2256 		return params;
2257 
2258 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2259 	if (!params) {
2260 		bt_dev_err(hdev, "out of memory");
2261 		return NULL;
2262 	}
2263 
2264 	bacpy(&params->addr, addr);
2265 	params->addr_type = addr_type;
2266 
2267 	list_add(&params->list, &hdev->le_conn_params);
2268 	INIT_LIST_HEAD(&params->action);
2269 
2270 	params->conn_min_interval = hdev->le_conn_min_interval;
2271 	params->conn_max_interval = hdev->le_conn_max_interval;
2272 	params->conn_latency = hdev->le_conn_latency;
2273 	params->supervision_timeout = hdev->le_supv_timeout;
2274 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2275 
2276 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2277 
2278 	return params;
2279 }
2280 
hci_conn_params_free(struct hci_conn_params * params)2281 void hci_conn_params_free(struct hci_conn_params *params)
2282 {
2283 	hci_pend_le_list_del_init(params);
2284 
2285 	if (params->conn) {
2286 		hci_conn_drop(params->conn);
2287 		hci_conn_put(params->conn);
2288 	}
2289 
2290 	list_del(&params->list);
2291 	kfree(params);
2292 }
2293 
2294 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2295 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2296 {
2297 	struct hci_conn_params *params;
2298 
2299 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2300 	if (!params)
2301 		return;
2302 
2303 	hci_conn_params_free(params);
2304 
2305 	hci_update_passive_scan(hdev);
2306 
2307 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2308 }
2309 
2310 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2311 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2312 {
2313 	struct hci_conn_params *params, *tmp;
2314 
2315 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2316 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2317 			continue;
2318 
2319 		/* If trying to establish one time connection to disabled
2320 		 * device, leave the params, but mark them as just once.
2321 		 */
2322 		if (params->explicit_connect) {
2323 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2324 			continue;
2325 		}
2326 
2327 		hci_conn_params_free(params);
2328 	}
2329 
2330 	BT_DBG("All LE disabled connection parameters were removed");
2331 }
2332 
2333 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2334 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2335 {
2336 	struct hci_conn_params *params, *tmp;
2337 
2338 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2339 		hci_conn_params_free(params);
2340 
2341 	BT_DBG("All LE connection parameters were removed");
2342 }
2343 
2344 /* Copy the Identity Address of the controller.
2345  *
2346  * If the controller has a public BD_ADDR, then by default use that one.
2347  * If this is a LE only controller without a public address, default to
2348  * the static random address.
2349  *
2350  * For debugging purposes it is possible to force controllers with a
2351  * public address to use the static random address instead.
2352  *
2353  * In case BR/EDR has been disabled on a dual-mode controller and
2354  * userspace has configured a static address, then that address
2355  * becomes the identity address instead of the public BR/EDR address.
2356  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2357 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2358 			       u8 *bdaddr_type)
2359 {
2360 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2361 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2362 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2363 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2364 		bacpy(bdaddr, &hdev->static_addr);
2365 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2366 	} else {
2367 		bacpy(bdaddr, &hdev->bdaddr);
2368 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2369 	}
2370 }
2371 
hci_clear_wake_reason(struct hci_dev * hdev)2372 static void hci_clear_wake_reason(struct hci_dev *hdev)
2373 {
2374 	hci_dev_lock(hdev);
2375 
2376 	hdev->wake_reason = 0;
2377 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2378 	hdev->wake_addr_type = 0;
2379 
2380 	hci_dev_unlock(hdev);
2381 }
2382 
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2383 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2384 				void *data)
2385 {
2386 	struct hci_dev *hdev =
2387 		container_of(nb, struct hci_dev, suspend_notifier);
2388 	int ret = 0;
2389 
2390 	/* Userspace has full control of this device. Do nothing. */
2391 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2392 		return NOTIFY_DONE;
2393 
2394 	/* To avoid a potential race with hci_unregister_dev. */
2395 	hci_dev_hold(hdev);
2396 
2397 	switch (action) {
2398 	case PM_HIBERNATION_PREPARE:
2399 	case PM_SUSPEND_PREPARE:
2400 		ret = hci_suspend_dev(hdev);
2401 		break;
2402 	case PM_POST_HIBERNATION:
2403 	case PM_POST_SUSPEND:
2404 		ret = hci_resume_dev(hdev);
2405 		break;
2406 	}
2407 
2408 	if (ret)
2409 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2410 			   action, ret);
2411 
2412 	hci_dev_put(hdev);
2413 	return NOTIFY_DONE;
2414 }
2415 
2416 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2417 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2418 {
2419 	struct hci_dev *hdev;
2420 	unsigned int alloc_size;
2421 
2422 	alloc_size = sizeof(*hdev);
2423 	if (sizeof_priv) {
2424 		/* Fixme: May need ALIGN-ment? */
2425 		alloc_size += sizeof_priv;
2426 	}
2427 
2428 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2429 	if (!hdev)
2430 		return NULL;
2431 
2432 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2433 	hdev->esco_type = (ESCO_HV1);
2434 	hdev->link_mode = (HCI_LM_ACCEPT);
2435 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2436 	hdev->io_capability = 0x03;	/* No Input No Output */
2437 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2438 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2439 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2440 	hdev->adv_instance_cnt = 0;
2441 	hdev->cur_adv_instance = 0x00;
2442 	hdev->adv_instance_timeout = 0;
2443 
2444 	hdev->advmon_allowlist_duration = 300;
2445 	hdev->advmon_no_filter_duration = 500;
2446 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2447 
2448 	hdev->sniff_max_interval = 800;
2449 	hdev->sniff_min_interval = 80;
2450 
2451 	hdev->le_adv_channel_map = 0x07;
2452 	hdev->le_adv_min_interval = 0x0800;
2453 	hdev->le_adv_max_interval = 0x0800;
2454 	hdev->le_scan_interval = 0x0060;
2455 	hdev->le_scan_window = 0x0030;
2456 	hdev->le_scan_int_suspend = 0x0400;
2457 	hdev->le_scan_window_suspend = 0x0012;
2458 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2459 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2460 	hdev->le_scan_int_adv_monitor = 0x0060;
2461 	hdev->le_scan_window_adv_monitor = 0x0030;
2462 	hdev->le_scan_int_connect = 0x0060;
2463 	hdev->le_scan_window_connect = 0x0060;
2464 	hdev->le_conn_min_interval = 0x0018;
2465 	hdev->le_conn_max_interval = 0x0028;
2466 	hdev->le_conn_latency = 0x0000;
2467 	hdev->le_supv_timeout = 0x002a;
2468 	hdev->le_def_tx_len = 0x001b;
2469 	hdev->le_def_tx_time = 0x0148;
2470 	hdev->le_max_tx_len = 0x001b;
2471 	hdev->le_max_tx_time = 0x0148;
2472 	hdev->le_max_rx_len = 0x001b;
2473 	hdev->le_max_rx_time = 0x0148;
2474 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2475 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2476 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2477 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2478 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2479 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2480 	hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2481 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2482 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2483 
2484 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2485 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2486 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2487 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2488 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2489 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2490 
2491 	/* default 1.28 sec page scan */
2492 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2493 	hdev->def_page_scan_int = 0x0800;
2494 	hdev->def_page_scan_window = 0x0012;
2495 
2496 	mutex_init(&hdev->lock);
2497 	mutex_init(&hdev->req_lock);
2498 
2499 	ida_init(&hdev->unset_handle_ida);
2500 
2501 	INIT_LIST_HEAD(&hdev->mesh_pending);
2502 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2503 	INIT_LIST_HEAD(&hdev->reject_list);
2504 	INIT_LIST_HEAD(&hdev->accept_list);
2505 	INIT_LIST_HEAD(&hdev->uuids);
2506 	INIT_LIST_HEAD(&hdev->link_keys);
2507 	INIT_LIST_HEAD(&hdev->long_term_keys);
2508 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2509 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2510 	INIT_LIST_HEAD(&hdev->le_accept_list);
2511 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2512 	INIT_LIST_HEAD(&hdev->le_conn_params);
2513 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2514 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2515 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2516 	INIT_LIST_HEAD(&hdev->adv_instances);
2517 	INIT_LIST_HEAD(&hdev->blocked_keys);
2518 	INIT_LIST_HEAD(&hdev->monitored_devices);
2519 
2520 	INIT_LIST_HEAD(&hdev->local_codecs);
2521 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2522 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2523 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2524 	INIT_WORK(&hdev->power_on, hci_power_on);
2525 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2526 
2527 	hci_cmd_sync_init(hdev);
2528 
2529 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2530 
2531 	skb_queue_head_init(&hdev->rx_q);
2532 	skb_queue_head_init(&hdev->cmd_q);
2533 	skb_queue_head_init(&hdev->raw_q);
2534 
2535 	init_waitqueue_head(&hdev->req_wait_q);
2536 
2537 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2538 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2539 
2540 	hci_devcd_setup(hdev);
2541 	hci_request_setup(hdev);
2542 
2543 	hci_init_sysfs(hdev);
2544 	discovery_init(hdev);
2545 
2546 	return hdev;
2547 }
2548 EXPORT_SYMBOL(hci_alloc_dev_priv);
2549 
2550 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2551 void hci_free_dev(struct hci_dev *hdev)
2552 {
2553 	/* will free via device release */
2554 	put_device(&hdev->dev);
2555 }
2556 EXPORT_SYMBOL(hci_free_dev);
2557 
2558 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2559 int hci_register_dev(struct hci_dev *hdev)
2560 {
2561 	int id, error;
2562 
2563 	if (!hdev->open || !hdev->close || !hdev->send)
2564 		return -EINVAL;
2565 
2566 	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2567 	if (id < 0)
2568 		return id;
2569 
2570 	error = dev_set_name(&hdev->dev, "hci%u", id);
2571 	if (error)
2572 		return error;
2573 
2574 	hdev->name = dev_name(&hdev->dev);
2575 	hdev->id = id;
2576 
2577 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2578 
2579 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2580 	if (!hdev->workqueue) {
2581 		error = -ENOMEM;
2582 		goto err;
2583 	}
2584 
2585 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2586 						      hdev->name);
2587 	if (!hdev->req_workqueue) {
2588 		destroy_workqueue(hdev->workqueue);
2589 		error = -ENOMEM;
2590 		goto err;
2591 	}
2592 
2593 	if (!IS_ERR_OR_NULL(bt_debugfs))
2594 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2595 
2596 	error = device_add(&hdev->dev);
2597 	if (error < 0)
2598 		goto err_wqueue;
2599 
2600 	hci_leds_init(hdev);
2601 
2602 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2603 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2604 				    hdev);
2605 	if (hdev->rfkill) {
2606 		if (rfkill_register(hdev->rfkill) < 0) {
2607 			rfkill_destroy(hdev->rfkill);
2608 			hdev->rfkill = NULL;
2609 		}
2610 	}
2611 
2612 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2613 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2614 
2615 	hci_dev_set_flag(hdev, HCI_SETUP);
2616 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2617 
2618 	/* Assume BR/EDR support until proven otherwise (such as
2619 	 * through reading supported features during init.
2620 	 */
2621 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2622 
2623 	write_lock(&hci_dev_list_lock);
2624 	list_add(&hdev->list, &hci_dev_list);
2625 	write_unlock(&hci_dev_list_lock);
2626 
2627 	/* Devices that are marked for raw-only usage are unconfigured
2628 	 * and should not be included in normal operation.
2629 	 */
2630 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2631 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2632 
2633 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2634 	 * callback.
2635 	 */
2636 	if (hdev->wakeup)
2637 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2638 
2639 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2640 	hci_dev_hold(hdev);
2641 
2642 	error = hci_register_suspend_notifier(hdev);
2643 	if (error)
2644 		BT_WARN("register suspend notifier failed error:%d\n", error);
2645 
2646 	queue_work(hdev->req_workqueue, &hdev->power_on);
2647 
2648 	idr_init(&hdev->adv_monitors_idr);
2649 	msft_register(hdev);
2650 
2651 	return id;
2652 
2653 err_wqueue:
2654 	debugfs_remove_recursive(hdev->debugfs);
2655 	destroy_workqueue(hdev->workqueue);
2656 	destroy_workqueue(hdev->req_workqueue);
2657 err:
2658 	ida_free(&hci_index_ida, hdev->id);
2659 
2660 	return error;
2661 }
2662 EXPORT_SYMBOL(hci_register_dev);
2663 
2664 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2665 void hci_unregister_dev(struct hci_dev *hdev)
2666 {
2667 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2668 
2669 	mutex_lock(&hdev->unregister_lock);
2670 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2671 	mutex_unlock(&hdev->unregister_lock);
2672 
2673 	write_lock(&hci_dev_list_lock);
2674 	list_del(&hdev->list);
2675 	write_unlock(&hci_dev_list_lock);
2676 
2677 	cancel_work_sync(&hdev->rx_work);
2678 	cancel_work_sync(&hdev->cmd_work);
2679 	cancel_work_sync(&hdev->tx_work);
2680 	cancel_work_sync(&hdev->power_on);
2681 	cancel_work_sync(&hdev->error_reset);
2682 
2683 	hci_cmd_sync_clear(hdev);
2684 
2685 	hci_unregister_suspend_notifier(hdev);
2686 
2687 	hci_dev_do_close(hdev);
2688 
2689 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2690 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2691 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2692 		hci_dev_lock(hdev);
2693 		mgmt_index_removed(hdev);
2694 		hci_dev_unlock(hdev);
2695 	}
2696 
2697 	/* mgmt_index_removed should take care of emptying the
2698 	 * pending list */
2699 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2700 
2701 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2702 
2703 	if (hdev->rfkill) {
2704 		rfkill_unregister(hdev->rfkill);
2705 		rfkill_destroy(hdev->rfkill);
2706 	}
2707 
2708 	device_del(&hdev->dev);
2709 	/* Actual cleanup is deferred until hci_release_dev(). */
2710 	hci_dev_put(hdev);
2711 }
2712 EXPORT_SYMBOL(hci_unregister_dev);
2713 
2714 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2715 void hci_release_dev(struct hci_dev *hdev)
2716 {
2717 	debugfs_remove_recursive(hdev->debugfs);
2718 	kfree_const(hdev->hw_info);
2719 	kfree_const(hdev->fw_info);
2720 
2721 	destroy_workqueue(hdev->workqueue);
2722 	destroy_workqueue(hdev->req_workqueue);
2723 
2724 	hci_dev_lock(hdev);
2725 	hci_bdaddr_list_clear(&hdev->reject_list);
2726 	hci_bdaddr_list_clear(&hdev->accept_list);
2727 	hci_uuids_clear(hdev);
2728 	hci_link_keys_clear(hdev);
2729 	hci_smp_ltks_clear(hdev);
2730 	hci_smp_irks_clear(hdev);
2731 	hci_remote_oob_data_clear(hdev);
2732 	hci_adv_instances_clear(hdev);
2733 	hci_adv_monitors_clear(hdev);
2734 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2735 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2736 	hci_conn_params_clear_all(hdev);
2737 	hci_discovery_filter_clear(hdev);
2738 	hci_blocked_keys_clear(hdev);
2739 	hci_codec_list_clear(&hdev->local_codecs);
2740 	msft_release(hdev);
2741 	hci_dev_unlock(hdev);
2742 
2743 	ida_destroy(&hdev->unset_handle_ida);
2744 	ida_free(&hci_index_ida, hdev->id);
2745 	kfree_skb(hdev->sent_cmd);
2746 	kfree_skb(hdev->req_skb);
2747 	kfree_skb(hdev->recv_event);
2748 	kfree(hdev);
2749 }
2750 EXPORT_SYMBOL(hci_release_dev);
2751 
hci_register_suspend_notifier(struct hci_dev * hdev)2752 int hci_register_suspend_notifier(struct hci_dev *hdev)
2753 {
2754 	int ret = 0;
2755 
2756 	if (!hdev->suspend_notifier.notifier_call &&
2757 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2758 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2759 		ret = register_pm_notifier(&hdev->suspend_notifier);
2760 	}
2761 
2762 	return ret;
2763 }
2764 
hci_unregister_suspend_notifier(struct hci_dev * hdev)2765 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2766 {
2767 	int ret = 0;
2768 
2769 	if (hdev->suspend_notifier.notifier_call) {
2770 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2771 		if (!ret)
2772 			hdev->suspend_notifier.notifier_call = NULL;
2773 	}
2774 
2775 	return ret;
2776 }
2777 
2778 /* Cancel ongoing command synchronously:
2779  *
2780  * - Cancel command timer
2781  * - Reset command counter
2782  * - Cancel command request
2783  */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2784 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2785 {
2786 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2787 
2788 	cancel_delayed_work_sync(&hdev->cmd_timer);
2789 	cancel_delayed_work_sync(&hdev->ncmd_timer);
2790 	atomic_set(&hdev->cmd_cnt, 1);
2791 
2792 	hci_cmd_sync_cancel_sync(hdev, err);
2793 }
2794 
2795 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2796 int hci_suspend_dev(struct hci_dev *hdev)
2797 {
2798 	int ret;
2799 
2800 	bt_dev_dbg(hdev, "");
2801 
2802 	/* Suspend should only act on when powered. */
2803 	if (!hdev_is_powered(hdev) ||
2804 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2805 		return 0;
2806 
2807 	/* If powering down don't attempt to suspend */
2808 	if (mgmt_powering_down(hdev))
2809 		return 0;
2810 
2811 	/* Cancel potentially blocking sync operation before suspend */
2812 	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2813 
2814 	hci_req_sync_lock(hdev);
2815 	ret = hci_suspend_sync(hdev);
2816 	hci_req_sync_unlock(hdev);
2817 
2818 	hci_clear_wake_reason(hdev);
2819 	mgmt_suspending(hdev, hdev->suspend_state);
2820 
2821 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2822 	return ret;
2823 }
2824 EXPORT_SYMBOL(hci_suspend_dev);
2825 
2826 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2827 int hci_resume_dev(struct hci_dev *hdev)
2828 {
2829 	int ret;
2830 
2831 	bt_dev_dbg(hdev, "");
2832 
2833 	/* Resume should only act on when powered. */
2834 	if (!hdev_is_powered(hdev) ||
2835 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2836 		return 0;
2837 
2838 	/* If powering down don't attempt to resume */
2839 	if (mgmt_powering_down(hdev))
2840 		return 0;
2841 
2842 	hci_req_sync_lock(hdev);
2843 	ret = hci_resume_sync(hdev);
2844 	hci_req_sync_unlock(hdev);
2845 
2846 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2847 		      hdev->wake_addr_type);
2848 
2849 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2850 	return ret;
2851 }
2852 EXPORT_SYMBOL(hci_resume_dev);
2853 
2854 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2855 int hci_reset_dev(struct hci_dev *hdev)
2856 {
2857 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2858 	struct sk_buff *skb;
2859 
2860 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2861 	if (!skb)
2862 		return -ENOMEM;
2863 
2864 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2865 	skb_put_data(skb, hw_err, 3);
2866 
2867 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2868 
2869 	/* Send Hardware Error to upper stack */
2870 	return hci_recv_frame(hdev, skb);
2871 }
2872 EXPORT_SYMBOL(hci_reset_dev);
2873 
2874 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2875 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2876 {
2877 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2878 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2879 		kfree_skb(skb);
2880 		return -ENXIO;
2881 	}
2882 
2883 	switch (hci_skb_pkt_type(skb)) {
2884 	case HCI_EVENT_PKT:
2885 		break;
2886 	case HCI_ACLDATA_PKT:
2887 		/* Detect if ISO packet has been sent as ACL */
2888 		if (hci_conn_num(hdev, ISO_LINK)) {
2889 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2890 			__u8 type;
2891 
2892 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2893 			if (type == ISO_LINK)
2894 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2895 		}
2896 		break;
2897 	case HCI_SCODATA_PKT:
2898 		break;
2899 	case HCI_ISODATA_PKT:
2900 		break;
2901 	default:
2902 		kfree_skb(skb);
2903 		return -EINVAL;
2904 	}
2905 
2906 	/* Incoming skb */
2907 	bt_cb(skb)->incoming = 1;
2908 
2909 	/* Time stamp */
2910 	__net_timestamp(skb);
2911 
2912 	skb_queue_tail(&hdev->rx_q, skb);
2913 	queue_work(hdev->workqueue, &hdev->rx_work);
2914 
2915 	return 0;
2916 }
2917 EXPORT_SYMBOL(hci_recv_frame);
2918 
2919 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2920 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2921 {
2922 	/* Mark as diagnostic packet */
2923 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2924 
2925 	/* Time stamp */
2926 	__net_timestamp(skb);
2927 
2928 	skb_queue_tail(&hdev->rx_q, skb);
2929 	queue_work(hdev->workqueue, &hdev->rx_work);
2930 
2931 	return 0;
2932 }
2933 EXPORT_SYMBOL(hci_recv_diag);
2934 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2935 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2936 {
2937 	va_list vargs;
2938 
2939 	va_start(vargs, fmt);
2940 	kfree_const(hdev->hw_info);
2941 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2942 	va_end(vargs);
2943 }
2944 EXPORT_SYMBOL(hci_set_hw_info);
2945 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2946 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2947 {
2948 	va_list vargs;
2949 
2950 	va_start(vargs, fmt);
2951 	kfree_const(hdev->fw_info);
2952 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2953 	va_end(vargs);
2954 }
2955 EXPORT_SYMBOL(hci_set_fw_info);
2956 
2957 /* ---- Interface to upper protocols ---- */
2958 
hci_register_cb(struct hci_cb * cb)2959 int hci_register_cb(struct hci_cb *cb)
2960 {
2961 	BT_DBG("%p name %s", cb, cb->name);
2962 
2963 	mutex_lock(&hci_cb_list_lock);
2964 	list_add_tail(&cb->list, &hci_cb_list);
2965 	mutex_unlock(&hci_cb_list_lock);
2966 
2967 	return 0;
2968 }
2969 EXPORT_SYMBOL(hci_register_cb);
2970 
hci_unregister_cb(struct hci_cb * cb)2971 int hci_unregister_cb(struct hci_cb *cb)
2972 {
2973 	BT_DBG("%p name %s", cb, cb->name);
2974 
2975 	mutex_lock(&hci_cb_list_lock);
2976 	list_del(&cb->list);
2977 	mutex_unlock(&hci_cb_list_lock);
2978 
2979 	return 0;
2980 }
2981 EXPORT_SYMBOL(hci_unregister_cb);
2982 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)2983 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2984 {
2985 	int err;
2986 
2987 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2988 	       skb->len);
2989 
2990 	/* Time stamp */
2991 	__net_timestamp(skb);
2992 
2993 	/* Send copy to monitor */
2994 	hci_send_to_monitor(hdev, skb);
2995 
2996 	if (atomic_read(&hdev->promisc)) {
2997 		/* Send copy to the sockets */
2998 		hci_send_to_sock(hdev, skb);
2999 	}
3000 
3001 	/* Get rid of skb owner, prior to sending to the driver. */
3002 	skb_orphan(skb);
3003 
3004 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3005 		kfree_skb(skb);
3006 		return -EINVAL;
3007 	}
3008 
3009 	err = hdev->send(hdev, skb);
3010 	if (err < 0) {
3011 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3012 		kfree_skb(skb);
3013 		return err;
3014 	}
3015 
3016 	return 0;
3017 }
3018 
3019 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3020 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3021 		 const void *param)
3022 {
3023 	struct sk_buff *skb;
3024 
3025 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3026 
3027 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3028 	if (!skb) {
3029 		bt_dev_err(hdev, "no memory for command");
3030 		return -ENOMEM;
3031 	}
3032 
3033 	/* Stand-alone HCI commands must be flagged as
3034 	 * single-command requests.
3035 	 */
3036 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3037 
3038 	skb_queue_tail(&hdev->cmd_q, skb);
3039 	queue_work(hdev->workqueue, &hdev->cmd_work);
3040 
3041 	return 0;
3042 }
3043 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3044 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3045 		   const void *param)
3046 {
3047 	struct sk_buff *skb;
3048 
3049 	if (hci_opcode_ogf(opcode) != 0x3f) {
3050 		/* A controller receiving a command shall respond with either
3051 		 * a Command Status Event or a Command Complete Event.
3052 		 * Therefore, all standard HCI commands must be sent via the
3053 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3054 		 * Some vendors do not comply with this rule for vendor-specific
3055 		 * commands and do not return any event. We want to support
3056 		 * unresponded commands for such cases only.
3057 		 */
3058 		bt_dev_err(hdev, "unresponded command not supported");
3059 		return -EINVAL;
3060 	}
3061 
3062 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3063 	if (!skb) {
3064 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3065 			   opcode);
3066 		return -ENOMEM;
3067 	}
3068 
3069 	hci_send_frame(hdev, skb);
3070 
3071 	return 0;
3072 }
3073 EXPORT_SYMBOL(__hci_cmd_send);
3074 
3075 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3076 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3077 {
3078 	struct hci_command_hdr *hdr;
3079 
3080 	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3081 		return NULL;
3082 
3083 	hdr = (void *)skb->data;
3084 
3085 	if (hdr->opcode != cpu_to_le16(opcode))
3086 		return NULL;
3087 
3088 	return skb->data + HCI_COMMAND_HDR_SIZE;
3089 }
3090 
3091 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3092 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3093 {
3094 	void *data;
3095 
3096 	/* Check if opcode matches last sent command */
3097 	data = hci_cmd_data(hdev->sent_cmd, opcode);
3098 	if (!data)
3099 		/* Check if opcode matches last request */
3100 		data = hci_cmd_data(hdev->req_skb, opcode);
3101 
3102 	return data;
3103 }
3104 
3105 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3106 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3107 {
3108 	struct hci_event_hdr *hdr;
3109 	int offset;
3110 
3111 	if (!hdev->recv_event)
3112 		return NULL;
3113 
3114 	hdr = (void *)hdev->recv_event->data;
3115 	offset = sizeof(*hdr);
3116 
3117 	if (hdr->evt != event) {
3118 		/* In case of LE metaevent check the subevent match */
3119 		if (hdr->evt == HCI_EV_LE_META) {
3120 			struct hci_ev_le_meta *ev;
3121 
3122 			ev = (void *)hdev->recv_event->data + offset;
3123 			offset += sizeof(*ev);
3124 			if (ev->subevent == event)
3125 				goto found;
3126 		}
3127 		return NULL;
3128 	}
3129 
3130 found:
3131 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3132 
3133 	return hdev->recv_event->data + offset;
3134 }
3135 
3136 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3137 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3138 {
3139 	struct hci_acl_hdr *hdr;
3140 	int len = skb->len;
3141 
3142 	skb_push(skb, HCI_ACL_HDR_SIZE);
3143 	skb_reset_transport_header(skb);
3144 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3145 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3146 	hdr->dlen   = cpu_to_le16(len);
3147 }
3148 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3149 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3150 			  struct sk_buff *skb, __u16 flags)
3151 {
3152 	struct hci_conn *conn = chan->conn;
3153 	struct hci_dev *hdev = conn->hdev;
3154 	struct sk_buff *list;
3155 
3156 	skb->len = skb_headlen(skb);
3157 	skb->data_len = 0;
3158 
3159 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3160 
3161 	hci_add_acl_hdr(skb, conn->handle, flags);
3162 
3163 	list = skb_shinfo(skb)->frag_list;
3164 	if (!list) {
3165 		/* Non fragmented */
3166 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3167 
3168 		skb_queue_tail(queue, skb);
3169 	} else {
3170 		/* Fragmented */
3171 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3172 
3173 		skb_shinfo(skb)->frag_list = NULL;
3174 
3175 		/* Queue all fragments atomically. We need to use spin_lock_bh
3176 		 * here because of 6LoWPAN links, as there this function is
3177 		 * called from softirq and using normal spin lock could cause
3178 		 * deadlocks.
3179 		 */
3180 		spin_lock_bh(&queue->lock);
3181 
3182 		__skb_queue_tail(queue, skb);
3183 
3184 		flags &= ~ACL_START;
3185 		flags |= ACL_CONT;
3186 		do {
3187 			skb = list; list = list->next;
3188 
3189 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3190 			hci_add_acl_hdr(skb, conn->handle, flags);
3191 
3192 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3193 
3194 			__skb_queue_tail(queue, skb);
3195 		} while (list);
3196 
3197 		spin_unlock_bh(&queue->lock);
3198 	}
3199 }
3200 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3201 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3202 {
3203 	struct hci_dev *hdev = chan->conn->hdev;
3204 
3205 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3206 
3207 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3208 
3209 	queue_work(hdev->workqueue, &hdev->tx_work);
3210 }
3211 
3212 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3213 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3214 {
3215 	struct hci_dev *hdev = conn->hdev;
3216 	struct hci_sco_hdr hdr;
3217 
3218 	BT_DBG("%s len %d", hdev->name, skb->len);
3219 
3220 	hdr.handle = cpu_to_le16(conn->handle);
3221 	hdr.dlen   = skb->len;
3222 
3223 	skb_push(skb, HCI_SCO_HDR_SIZE);
3224 	skb_reset_transport_header(skb);
3225 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3226 
3227 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3228 
3229 	skb_queue_tail(&conn->data_q, skb);
3230 	queue_work(hdev->workqueue, &hdev->tx_work);
3231 }
3232 
3233 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3234 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3235 {
3236 	struct hci_iso_hdr *hdr;
3237 	int len = skb->len;
3238 
3239 	skb_push(skb, HCI_ISO_HDR_SIZE);
3240 	skb_reset_transport_header(skb);
3241 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3242 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3243 	hdr->dlen   = cpu_to_le16(len);
3244 }
3245 
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3246 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3247 			  struct sk_buff *skb)
3248 {
3249 	struct hci_dev *hdev = conn->hdev;
3250 	struct sk_buff *list;
3251 	__u16 flags;
3252 
3253 	skb->len = skb_headlen(skb);
3254 	skb->data_len = 0;
3255 
3256 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3257 
3258 	list = skb_shinfo(skb)->frag_list;
3259 
3260 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3261 	hci_add_iso_hdr(skb, conn->handle, flags);
3262 
3263 	if (!list) {
3264 		/* Non fragmented */
3265 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3266 
3267 		skb_queue_tail(queue, skb);
3268 	} else {
3269 		/* Fragmented */
3270 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3271 
3272 		skb_shinfo(skb)->frag_list = NULL;
3273 
3274 		__skb_queue_tail(queue, skb);
3275 
3276 		do {
3277 			skb = list; list = list->next;
3278 
3279 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3280 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3281 						   0x00);
3282 			hci_add_iso_hdr(skb, conn->handle, flags);
3283 
3284 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3285 
3286 			__skb_queue_tail(queue, skb);
3287 		} while (list);
3288 	}
3289 }
3290 
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3291 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3292 {
3293 	struct hci_dev *hdev = conn->hdev;
3294 
3295 	BT_DBG("%s len %d", hdev->name, skb->len);
3296 
3297 	hci_queue_iso(conn, &conn->data_q, skb);
3298 
3299 	queue_work(hdev->workqueue, &hdev->tx_work);
3300 }
3301 
3302 /* ---- HCI TX task (outgoing data) ---- */
3303 
3304 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3305 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3306 {
3307 	struct hci_dev *hdev;
3308 	int cnt, q;
3309 
3310 	if (!conn) {
3311 		*quote = 0;
3312 		return;
3313 	}
3314 
3315 	hdev = conn->hdev;
3316 
3317 	switch (conn->type) {
3318 	case ACL_LINK:
3319 		cnt = hdev->acl_cnt;
3320 		break;
3321 	case SCO_LINK:
3322 	case ESCO_LINK:
3323 		cnt = hdev->sco_cnt;
3324 		break;
3325 	case LE_LINK:
3326 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3327 		break;
3328 	case ISO_LINK:
3329 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3330 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3331 		break;
3332 	default:
3333 		cnt = 0;
3334 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3335 	}
3336 
3337 	q = cnt / num;
3338 	*quote = q ? q : 1;
3339 }
3340 
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3341 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3342 				     int *quote)
3343 {
3344 	struct hci_conn_hash *h = &hdev->conn_hash;
3345 	struct hci_conn *conn = NULL, *c;
3346 	unsigned int num = 0, min = ~0;
3347 
3348 	/* We don't have to lock device here. Connections are always
3349 	 * added and removed with TX task disabled. */
3350 
3351 	rcu_read_lock();
3352 
3353 	list_for_each_entry_rcu(c, &h->list, list) {
3354 		if (c->type != type || skb_queue_empty(&c->data_q))
3355 			continue;
3356 
3357 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3358 			continue;
3359 
3360 		num++;
3361 
3362 		if (c->sent < min) {
3363 			min  = c->sent;
3364 			conn = c;
3365 		}
3366 
3367 		if (hci_conn_num(hdev, type) == num)
3368 			break;
3369 	}
3370 
3371 	rcu_read_unlock();
3372 
3373 	hci_quote_sent(conn, num, quote);
3374 
3375 	BT_DBG("conn %p quote %d", conn, *quote);
3376 	return conn;
3377 }
3378 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3379 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3380 {
3381 	struct hci_conn_hash *h = &hdev->conn_hash;
3382 	struct hci_conn *c;
3383 
3384 	bt_dev_err(hdev, "link tx timeout");
3385 
3386 	rcu_read_lock();
3387 
3388 	/* Kill stalled connections */
3389 	list_for_each_entry_rcu(c, &h->list, list) {
3390 		if (c->type == type && c->sent) {
3391 			bt_dev_err(hdev, "killing stalled connection %pMR",
3392 				   &c->dst);
3393 			/* hci_disconnect might sleep, so, we have to release
3394 			 * the RCU read lock before calling it.
3395 			 */
3396 			rcu_read_unlock();
3397 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3398 			rcu_read_lock();
3399 		}
3400 	}
3401 
3402 	rcu_read_unlock();
3403 }
3404 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3405 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3406 				      int *quote)
3407 {
3408 	struct hci_conn_hash *h = &hdev->conn_hash;
3409 	struct hci_chan *chan = NULL;
3410 	unsigned int num = 0, min = ~0, cur_prio = 0;
3411 	struct hci_conn *conn;
3412 	int conn_num = 0;
3413 
3414 	BT_DBG("%s", hdev->name);
3415 
3416 	rcu_read_lock();
3417 
3418 	list_for_each_entry_rcu(conn, &h->list, list) {
3419 		struct hci_chan *tmp;
3420 
3421 		if (conn->type != type)
3422 			continue;
3423 
3424 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3425 			continue;
3426 
3427 		conn_num++;
3428 
3429 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3430 			struct sk_buff *skb;
3431 
3432 			if (skb_queue_empty(&tmp->data_q))
3433 				continue;
3434 
3435 			skb = skb_peek(&tmp->data_q);
3436 			if (skb->priority < cur_prio)
3437 				continue;
3438 
3439 			if (skb->priority > cur_prio) {
3440 				num = 0;
3441 				min = ~0;
3442 				cur_prio = skb->priority;
3443 			}
3444 
3445 			num++;
3446 
3447 			if (conn->sent < min) {
3448 				min  = conn->sent;
3449 				chan = tmp;
3450 			}
3451 		}
3452 
3453 		if (hci_conn_num(hdev, type) == conn_num)
3454 			break;
3455 	}
3456 
3457 	rcu_read_unlock();
3458 
3459 	if (!chan)
3460 		return NULL;
3461 
3462 	hci_quote_sent(chan->conn, num, quote);
3463 
3464 	BT_DBG("chan %p quote %d", chan, *quote);
3465 	return chan;
3466 }
3467 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3468 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3469 {
3470 	struct hci_conn_hash *h = &hdev->conn_hash;
3471 	struct hci_conn *conn;
3472 	int num = 0;
3473 
3474 	BT_DBG("%s", hdev->name);
3475 
3476 	rcu_read_lock();
3477 
3478 	list_for_each_entry_rcu(conn, &h->list, list) {
3479 		struct hci_chan *chan;
3480 
3481 		if (conn->type != type)
3482 			continue;
3483 
3484 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3485 			continue;
3486 
3487 		num++;
3488 
3489 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3490 			struct sk_buff *skb;
3491 
3492 			if (chan->sent) {
3493 				chan->sent = 0;
3494 				continue;
3495 			}
3496 
3497 			if (skb_queue_empty(&chan->data_q))
3498 				continue;
3499 
3500 			skb = skb_peek(&chan->data_q);
3501 			if (skb->priority >= HCI_PRIO_MAX - 1)
3502 				continue;
3503 
3504 			skb->priority = HCI_PRIO_MAX - 1;
3505 
3506 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3507 			       skb->priority);
3508 		}
3509 
3510 		if (hci_conn_num(hdev, type) == num)
3511 			break;
3512 	}
3513 
3514 	rcu_read_unlock();
3515 
3516 }
3517 
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3518 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3519 {
3520 	unsigned long last_tx;
3521 
3522 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3523 		return;
3524 
3525 	switch (type) {
3526 	case LE_LINK:
3527 		last_tx = hdev->le_last_tx;
3528 		break;
3529 	default:
3530 		last_tx = hdev->acl_last_tx;
3531 		break;
3532 	}
3533 
3534 	/* tx timeout must be longer than maximum link supervision timeout
3535 	 * (40.9 seconds)
3536 	 */
3537 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3538 		hci_link_tx_to(hdev, type);
3539 }
3540 
3541 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3542 static void hci_sched_sco(struct hci_dev *hdev)
3543 {
3544 	struct hci_conn *conn;
3545 	struct sk_buff *skb;
3546 	int quote;
3547 
3548 	BT_DBG("%s", hdev->name);
3549 
3550 	if (!hci_conn_num(hdev, SCO_LINK))
3551 		return;
3552 
3553 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3554 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3555 			BT_DBG("skb %p len %d", skb, skb->len);
3556 			hci_send_frame(hdev, skb);
3557 
3558 			conn->sent++;
3559 			if (conn->sent == ~0)
3560 				conn->sent = 0;
3561 		}
3562 	}
3563 }
3564 
hci_sched_esco(struct hci_dev * hdev)3565 static void hci_sched_esco(struct hci_dev *hdev)
3566 {
3567 	struct hci_conn *conn;
3568 	struct sk_buff *skb;
3569 	int quote;
3570 
3571 	BT_DBG("%s", hdev->name);
3572 
3573 	if (!hci_conn_num(hdev, ESCO_LINK))
3574 		return;
3575 
3576 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3577 						     &quote))) {
3578 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3579 			BT_DBG("skb %p len %d", skb, skb->len);
3580 			hci_send_frame(hdev, skb);
3581 
3582 			conn->sent++;
3583 			if (conn->sent == ~0)
3584 				conn->sent = 0;
3585 		}
3586 	}
3587 }
3588 
hci_sched_acl_pkt(struct hci_dev * hdev)3589 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3590 {
3591 	unsigned int cnt = hdev->acl_cnt;
3592 	struct hci_chan *chan;
3593 	struct sk_buff *skb;
3594 	int quote;
3595 
3596 	__check_timeout(hdev, cnt, ACL_LINK);
3597 
3598 	while (hdev->acl_cnt &&
3599 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3600 		u32 priority = (skb_peek(&chan->data_q))->priority;
3601 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3602 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3603 			       skb->len, skb->priority);
3604 
3605 			/* Stop if priority has changed */
3606 			if (skb->priority < priority)
3607 				break;
3608 
3609 			skb = skb_dequeue(&chan->data_q);
3610 
3611 			hci_conn_enter_active_mode(chan->conn,
3612 						   bt_cb(skb)->force_active);
3613 
3614 			hci_send_frame(hdev, skb);
3615 			hdev->acl_last_tx = jiffies;
3616 
3617 			hdev->acl_cnt--;
3618 			chan->sent++;
3619 			chan->conn->sent++;
3620 
3621 			/* Send pending SCO packets right away */
3622 			hci_sched_sco(hdev);
3623 			hci_sched_esco(hdev);
3624 		}
3625 	}
3626 
3627 	if (cnt != hdev->acl_cnt)
3628 		hci_prio_recalculate(hdev, ACL_LINK);
3629 }
3630 
hci_sched_acl(struct hci_dev * hdev)3631 static void hci_sched_acl(struct hci_dev *hdev)
3632 {
3633 	BT_DBG("%s", hdev->name);
3634 
3635 	/* No ACL link over BR/EDR controller */
3636 	if (!hci_conn_num(hdev, ACL_LINK))
3637 		return;
3638 
3639 	hci_sched_acl_pkt(hdev);
3640 }
3641 
hci_sched_le(struct hci_dev * hdev)3642 static void hci_sched_le(struct hci_dev *hdev)
3643 {
3644 	struct hci_chan *chan;
3645 	struct sk_buff *skb;
3646 	int quote, *cnt, tmp;
3647 
3648 	BT_DBG("%s", hdev->name);
3649 
3650 	if (!hci_conn_num(hdev, LE_LINK))
3651 		return;
3652 
3653 	cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3654 
3655 	__check_timeout(hdev, *cnt, LE_LINK);
3656 
3657 	tmp = *cnt;
3658 	while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3659 		u32 priority = (skb_peek(&chan->data_q))->priority;
3660 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3661 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3662 			       skb->len, skb->priority);
3663 
3664 			/* Stop if priority has changed */
3665 			if (skb->priority < priority)
3666 				break;
3667 
3668 			skb = skb_dequeue(&chan->data_q);
3669 
3670 			hci_send_frame(hdev, skb);
3671 			hdev->le_last_tx = jiffies;
3672 
3673 			(*cnt)--;
3674 			chan->sent++;
3675 			chan->conn->sent++;
3676 
3677 			/* Send pending SCO packets right away */
3678 			hci_sched_sco(hdev);
3679 			hci_sched_esco(hdev);
3680 		}
3681 	}
3682 
3683 	if (*cnt != tmp)
3684 		hci_prio_recalculate(hdev, LE_LINK);
3685 }
3686 
3687 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3688 static void hci_sched_iso(struct hci_dev *hdev)
3689 {
3690 	struct hci_conn *conn;
3691 	struct sk_buff *skb;
3692 	int quote, *cnt;
3693 
3694 	BT_DBG("%s", hdev->name);
3695 
3696 	if (!hci_conn_num(hdev, ISO_LINK))
3697 		return;
3698 
3699 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3700 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3701 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3702 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3703 			BT_DBG("skb %p len %d", skb, skb->len);
3704 			hci_send_frame(hdev, skb);
3705 
3706 			conn->sent++;
3707 			if (conn->sent == ~0)
3708 				conn->sent = 0;
3709 			(*cnt)--;
3710 		}
3711 	}
3712 }
3713 
hci_tx_work(struct work_struct * work)3714 static void hci_tx_work(struct work_struct *work)
3715 {
3716 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3717 	struct sk_buff *skb;
3718 
3719 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3720 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3721 
3722 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3723 		/* Schedule queues and send stuff to HCI driver */
3724 		hci_sched_sco(hdev);
3725 		hci_sched_esco(hdev);
3726 		hci_sched_iso(hdev);
3727 		hci_sched_acl(hdev);
3728 		hci_sched_le(hdev);
3729 	}
3730 
3731 	/* Send next queued raw (unknown type) packet */
3732 	while ((skb = skb_dequeue(&hdev->raw_q)))
3733 		hci_send_frame(hdev, skb);
3734 }
3735 
3736 /* ----- HCI RX task (incoming data processing) ----- */
3737 
3738 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3739 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3740 {
3741 	struct hci_acl_hdr *hdr = (void *) skb->data;
3742 	struct hci_conn *conn;
3743 	__u16 handle, flags;
3744 
3745 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3746 
3747 	handle = __le16_to_cpu(hdr->handle);
3748 	flags  = hci_flags(handle);
3749 	handle = hci_handle(handle);
3750 
3751 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3752 	       handle, flags);
3753 
3754 	hdev->stat.acl_rx++;
3755 
3756 	hci_dev_lock(hdev);
3757 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3758 	hci_dev_unlock(hdev);
3759 
3760 	if (conn) {
3761 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3762 
3763 		/* Send to upper protocol */
3764 		l2cap_recv_acldata(conn, skb, flags);
3765 		return;
3766 	} else {
3767 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3768 			   handle);
3769 	}
3770 
3771 	kfree_skb(skb);
3772 }
3773 
3774 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3775 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3776 {
3777 	struct hci_sco_hdr *hdr = (void *) skb->data;
3778 	struct hci_conn *conn;
3779 	__u16 handle, flags;
3780 
3781 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3782 
3783 	handle = __le16_to_cpu(hdr->handle);
3784 	flags  = hci_flags(handle);
3785 	handle = hci_handle(handle);
3786 
3787 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3788 	       handle, flags);
3789 
3790 	hdev->stat.sco_rx++;
3791 
3792 	hci_dev_lock(hdev);
3793 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3794 	hci_dev_unlock(hdev);
3795 
3796 	if (conn) {
3797 		/* Send to upper protocol */
3798 		hci_skb_pkt_status(skb) = flags & 0x03;
3799 		sco_recv_scodata(conn, skb);
3800 		return;
3801 	} else {
3802 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3803 				       handle);
3804 	}
3805 
3806 	kfree_skb(skb);
3807 }
3808 
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3809 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3810 {
3811 	struct hci_iso_hdr *hdr;
3812 	struct hci_conn *conn;
3813 	__u16 handle, flags;
3814 
3815 	hdr = skb_pull_data(skb, sizeof(*hdr));
3816 	if (!hdr) {
3817 		bt_dev_err(hdev, "ISO packet too small");
3818 		goto drop;
3819 	}
3820 
3821 	handle = __le16_to_cpu(hdr->handle);
3822 	flags  = hci_flags(handle);
3823 	handle = hci_handle(handle);
3824 
3825 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3826 		   handle, flags);
3827 
3828 	hci_dev_lock(hdev);
3829 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3830 	hci_dev_unlock(hdev);
3831 
3832 	if (!conn) {
3833 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3834 			   handle);
3835 		goto drop;
3836 	}
3837 
3838 	/* Send to upper protocol */
3839 	iso_recv(conn, skb, flags);
3840 	return;
3841 
3842 drop:
3843 	kfree_skb(skb);
3844 }
3845 
hci_req_is_complete(struct hci_dev * hdev)3846 static bool hci_req_is_complete(struct hci_dev *hdev)
3847 {
3848 	struct sk_buff *skb;
3849 
3850 	skb = skb_peek(&hdev->cmd_q);
3851 	if (!skb)
3852 		return true;
3853 
3854 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3855 }
3856 
hci_resend_last(struct hci_dev * hdev)3857 static void hci_resend_last(struct hci_dev *hdev)
3858 {
3859 	struct hci_command_hdr *sent;
3860 	struct sk_buff *skb;
3861 	u16 opcode;
3862 
3863 	if (!hdev->sent_cmd)
3864 		return;
3865 
3866 	sent = (void *) hdev->sent_cmd->data;
3867 	opcode = __le16_to_cpu(sent->opcode);
3868 	if (opcode == HCI_OP_RESET)
3869 		return;
3870 
3871 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3872 	if (!skb)
3873 		return;
3874 
3875 	skb_queue_head(&hdev->cmd_q, skb);
3876 	queue_work(hdev->workqueue, &hdev->cmd_work);
3877 }
3878 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3879 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3880 			  hci_req_complete_t *req_complete,
3881 			  hci_req_complete_skb_t *req_complete_skb)
3882 {
3883 	struct sk_buff *skb;
3884 	unsigned long flags;
3885 
3886 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3887 
3888 	/* If the completed command doesn't match the last one that was
3889 	 * sent we need to do special handling of it.
3890 	 */
3891 	if (!hci_sent_cmd_data(hdev, opcode)) {
3892 		/* Some CSR based controllers generate a spontaneous
3893 		 * reset complete event during init and any pending
3894 		 * command will never be completed. In such a case we
3895 		 * need to resend whatever was the last sent
3896 		 * command.
3897 		 */
3898 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3899 			hci_resend_last(hdev);
3900 
3901 		return;
3902 	}
3903 
3904 	/* If we reach this point this event matches the last command sent */
3905 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3906 
3907 	/* If the command succeeded and there's still more commands in
3908 	 * this request the request is not yet complete.
3909 	 */
3910 	if (!status && !hci_req_is_complete(hdev))
3911 		return;
3912 
3913 	skb = hdev->req_skb;
3914 
3915 	/* If this was the last command in a request the complete
3916 	 * callback would be found in hdev->req_skb instead of the
3917 	 * command queue (hdev->cmd_q).
3918 	 */
3919 	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3920 		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3921 		return;
3922 	}
3923 
3924 	if (skb && bt_cb(skb)->hci.req_complete) {
3925 		*req_complete = bt_cb(skb)->hci.req_complete;
3926 		return;
3927 	}
3928 
3929 	/* Remove all pending commands belonging to this request */
3930 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3931 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3932 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3933 			__skb_queue_head(&hdev->cmd_q, skb);
3934 			break;
3935 		}
3936 
3937 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3938 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3939 		else
3940 			*req_complete = bt_cb(skb)->hci.req_complete;
3941 		dev_kfree_skb_irq(skb);
3942 	}
3943 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3944 }
3945 
hci_rx_work(struct work_struct * work)3946 static void hci_rx_work(struct work_struct *work)
3947 {
3948 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3949 	struct sk_buff *skb;
3950 
3951 	BT_DBG("%s", hdev->name);
3952 
3953 	/* The kcov_remote functions used for collecting packet parsing
3954 	 * coverage information from this background thread and associate
3955 	 * the coverage with the syscall's thread which originally injected
3956 	 * the packet. This helps fuzzing the kernel.
3957 	 */
3958 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3959 		kcov_remote_start_common(skb_get_kcov_handle(skb));
3960 
3961 		/* Send copy to monitor */
3962 		hci_send_to_monitor(hdev, skb);
3963 
3964 		if (atomic_read(&hdev->promisc)) {
3965 			/* Send copy to the sockets */
3966 			hci_send_to_sock(hdev, skb);
3967 		}
3968 
3969 		/* If the device has been opened in HCI_USER_CHANNEL,
3970 		 * the userspace has exclusive access to device.
3971 		 * When device is HCI_INIT, we still need to process
3972 		 * the data packets to the driver in order
3973 		 * to complete its setup().
3974 		 */
3975 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3976 		    !test_bit(HCI_INIT, &hdev->flags)) {
3977 			kfree_skb(skb);
3978 			continue;
3979 		}
3980 
3981 		if (test_bit(HCI_INIT, &hdev->flags)) {
3982 			/* Don't process data packets in this states. */
3983 			switch (hci_skb_pkt_type(skb)) {
3984 			case HCI_ACLDATA_PKT:
3985 			case HCI_SCODATA_PKT:
3986 			case HCI_ISODATA_PKT:
3987 				kfree_skb(skb);
3988 				continue;
3989 			}
3990 		}
3991 
3992 		/* Process frame */
3993 		switch (hci_skb_pkt_type(skb)) {
3994 		case HCI_EVENT_PKT:
3995 			BT_DBG("%s Event packet", hdev->name);
3996 			hci_event_packet(hdev, skb);
3997 			break;
3998 
3999 		case HCI_ACLDATA_PKT:
4000 			BT_DBG("%s ACL data packet", hdev->name);
4001 			hci_acldata_packet(hdev, skb);
4002 			break;
4003 
4004 		case HCI_SCODATA_PKT:
4005 			BT_DBG("%s SCO data packet", hdev->name);
4006 			hci_scodata_packet(hdev, skb);
4007 			break;
4008 
4009 		case HCI_ISODATA_PKT:
4010 			BT_DBG("%s ISO data packet", hdev->name);
4011 			hci_isodata_packet(hdev, skb);
4012 			break;
4013 
4014 		default:
4015 			kfree_skb(skb);
4016 			break;
4017 		}
4018 	}
4019 }
4020 
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4021 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4022 {
4023 	int err;
4024 
4025 	bt_dev_dbg(hdev, "skb %p", skb);
4026 
4027 	kfree_skb(hdev->sent_cmd);
4028 
4029 	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4030 	if (!hdev->sent_cmd) {
4031 		skb_queue_head(&hdev->cmd_q, skb);
4032 		queue_work(hdev->workqueue, &hdev->cmd_work);
4033 		return;
4034 	}
4035 
4036 	err = hci_send_frame(hdev, skb);
4037 	if (err < 0) {
4038 		hci_cmd_sync_cancel_sync(hdev, -err);
4039 		return;
4040 	}
4041 
4042 	if (hci_req_status_pend(hdev) &&
4043 	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4044 		kfree_skb(hdev->req_skb);
4045 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4046 	}
4047 
4048 	atomic_dec(&hdev->cmd_cnt);
4049 }
4050 
hci_cmd_work(struct work_struct * work)4051 static void hci_cmd_work(struct work_struct *work)
4052 {
4053 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4054 	struct sk_buff *skb;
4055 
4056 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4057 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4058 
4059 	/* Send queued commands */
4060 	if (atomic_read(&hdev->cmd_cnt)) {
4061 		skb = skb_dequeue(&hdev->cmd_q);
4062 		if (!skb)
4063 			return;
4064 
4065 		hci_send_cmd_sync(hdev, skb);
4066 
4067 		rcu_read_lock();
4068 		if (test_bit(HCI_RESET, &hdev->flags) ||
4069 		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4070 			cancel_delayed_work(&hdev->cmd_timer);
4071 		else
4072 			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4073 					   HCI_CMD_TIMEOUT);
4074 		rcu_read_unlock();
4075 	}
4076 }
4077