xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 81d67439)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI core. */
26 
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30 
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47 
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51 
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 
55 #define AUTO_OFF_TIMEOUT 2000
56 
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 
61 static DEFINE_RWLOCK(hci_task_lock);
62 
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66 
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70 
71 /* HCI protocols */
72 #define HCI_MAX_PROTO	2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 
78 /* ---- HCI notifications ---- */
79 
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82 	return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84 
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89 
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94 
95 /* ---- HCI requests ---- */
96 
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100 
101 	/* If this is the init phase check if the completed command matches
102 	 * the last init command, and if not just return.
103 	 */
104 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 		return;
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
117 
118 	if (hdev->req_status == HCI_REQ_PEND) {
119 		hdev->req_result = err;
120 		hdev->req_status = HCI_REQ_CANCELED;
121 		wake_up_interruptible(&hdev->req_wait_q);
122 	}
123 }
124 
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 					unsigned long opt, __u32 timeout)
128 {
129 	DECLARE_WAITQUEUE(wait, current);
130 	int err = 0;
131 
132 	BT_DBG("%s start", hdev->name);
133 
134 	hdev->req_status = HCI_REQ_PEND;
135 
136 	add_wait_queue(&hdev->req_wait_q, &wait);
137 	set_current_state(TASK_INTERRUPTIBLE);
138 
139 	req(hdev, opt);
140 	schedule_timeout(timeout);
141 
142 	remove_wait_queue(&hdev->req_wait_q, &wait);
143 
144 	if (signal_pending(current))
145 		return -EINTR;
146 
147 	switch (hdev->req_status) {
148 	case HCI_REQ_DONE:
149 		err = -bt_to_errno(hdev->req_result);
150 		break;
151 
152 	case HCI_REQ_CANCELED:
153 		err = -hdev->req_result;
154 		break;
155 
156 	default:
157 		err = -ETIMEDOUT;
158 		break;
159 	}
160 
161 	hdev->req_status = hdev->req_result = 0;
162 
163 	BT_DBG("%s end: err %d", hdev->name, err);
164 
165 	return err;
166 }
167 
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 					unsigned long opt, __u32 timeout)
170 {
171 	int ret;
172 
173 	if (!test_bit(HCI_UP, &hdev->flags))
174 		return -ENETDOWN;
175 
176 	/* Serialize all requests */
177 	hci_req_lock(hdev);
178 	ret = __hci_request(hdev, req, opt, timeout);
179 	hci_req_unlock(hdev);
180 
181 	return ret;
182 }
183 
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 	BT_DBG("%s %ld", hdev->name, opt);
187 
188 	/* Reset device */
189 	set_bit(HCI_RESET, &hdev->flags);
190 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192 
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195 	struct hci_cp_delete_stored_link_key cp;
196 	struct sk_buff *skb;
197 	__le16 param;
198 	__u8 flt_type;
199 
200 	BT_DBG("%s %ld", hdev->name, opt);
201 
202 	/* Driver initialization */
203 
204 	/* Special commands */
205 	while ((skb = skb_dequeue(&hdev->driver_init))) {
206 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 		skb->dev = (void *) hdev;
208 
209 		skb_queue_tail(&hdev->cmd_q, skb);
210 		tasklet_schedule(&hdev->cmd_task);
211 	}
212 	skb_queue_purge(&hdev->driver_init);
213 
214 	/* Mandatory initialization */
215 
216 	/* Reset */
217 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 			set_bit(HCI_RESET, &hdev->flags);
219 			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220 	}
221 
222 	/* Read Local Supported Features */
223 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224 
225 	/* Read Local Version */
226 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227 
228 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230 
231 #if 0
232 	/* Host buffer size */
233 	{
234 		struct hci_cp_host_buffer_size cp;
235 		cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 		cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 		cp.acl_max_pkt = cpu_to_le16(0xffff);
238 		cp.sco_max_pkt = cpu_to_le16(0xffff);
239 		hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 	}
241 #endif
242 
243 	/* Read BD Address */
244 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245 
246 	/* Read Class of Device */
247 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248 
249 	/* Read Local Name */
250 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251 
252 	/* Read Voice Setting */
253 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254 
255 	/* Optional initialization */
256 
257 	/* Clear Event Filters */
258 	flt_type = HCI_FLT_CLEAR_ALL;
259 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260 
261 	/* Connection accept timeout ~20 secs */
262 	param = cpu_to_le16(0x7d00);
263 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264 
265 	bacpy(&cp.bdaddr, BDADDR_ANY);
266 	cp.delete_all = 1;
267 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269 
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272 	BT_DBG("%s", hdev->name);
273 
274 	/* Read LE buffer size */
275 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277 
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280 	__u8 scan = opt;
281 
282 	BT_DBG("%s %x", hdev->name, scan);
283 
284 	/* Inquiry and Page scans */
285 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287 
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290 	__u8 auth = opt;
291 
292 	BT_DBG("%s %x", hdev->name, auth);
293 
294 	/* Authentication */
295 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297 
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 	__u8 encrypt = opt;
301 
302 	BT_DBG("%s %x", hdev->name, encrypt);
303 
304 	/* Encryption */
305 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307 
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 	__le16 policy = cpu_to_le16(opt);
311 
312 	BT_DBG("%s %x", hdev->name, policy);
313 
314 	/* Default link policy */
315 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317 
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322 	struct hci_dev *hdev = NULL;
323 	struct list_head *p;
324 
325 	BT_DBG("%d", index);
326 
327 	if (index < 0)
328 		return NULL;
329 
330 	read_lock(&hci_dev_list_lock);
331 	list_for_each(p, &hci_dev_list) {
332 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
333 		if (d->id == index) {
334 			hdev = hci_dev_hold(d);
335 			break;
336 		}
337 	}
338 	read_unlock(&hci_dev_list_lock);
339 	return hdev;
340 }
341 
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
344 {
345 	struct inquiry_cache *cache = &hdev->inq_cache;
346 	struct inquiry_entry *next  = cache->list, *e;
347 
348 	BT_DBG("cache %p", cache);
349 
350 	cache->list = NULL;
351 	while ((e = next)) {
352 		next = e->next;
353 		kfree(e);
354 	}
355 }
356 
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 {
359 	struct inquiry_cache *cache = &hdev->inq_cache;
360 	struct inquiry_entry *e;
361 
362 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363 
364 	for (e = cache->list; e; e = e->next)
365 		if (!bacmp(&e->data.bdaddr, bdaddr))
366 			break;
367 	return e;
368 }
369 
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 {
372 	struct inquiry_cache *cache = &hdev->inq_cache;
373 	struct inquiry_entry *ie;
374 
375 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376 
377 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 	if (!ie) {
379 		/* Entry not in the cache. Add new one. */
380 		ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 		if (!ie)
382 			return;
383 
384 		ie->next = cache->list;
385 		cache->list = ie;
386 	}
387 
388 	memcpy(&ie->data, data, sizeof(*data));
389 	ie->timestamp = jiffies;
390 	cache->timestamp = jiffies;
391 }
392 
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 {
395 	struct inquiry_cache *cache = &hdev->inq_cache;
396 	struct inquiry_info *info = (struct inquiry_info *) buf;
397 	struct inquiry_entry *e;
398 	int copied = 0;
399 
400 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 		struct inquiry_data *data = &e->data;
402 		bacpy(&info->bdaddr, &data->bdaddr);
403 		info->pscan_rep_mode	= data->pscan_rep_mode;
404 		info->pscan_period_mode	= data->pscan_period_mode;
405 		info->pscan_mode	= data->pscan_mode;
406 		memcpy(info->dev_class, data->dev_class, 3);
407 		info->clock_offset	= data->clock_offset;
408 		info++;
409 	}
410 
411 	BT_DBG("cache %p, copied %d", cache, copied);
412 	return copied;
413 }
414 
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 {
417 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 	struct hci_cp_inquiry cp;
419 
420 	BT_DBG("%s", hdev->name);
421 
422 	if (test_bit(HCI_INQUIRY, &hdev->flags))
423 		return;
424 
425 	/* Start Inquiry */
426 	memcpy(&cp.lap, &ir->lap, 3);
427 	cp.length  = ir->length;
428 	cp.num_rsp = ir->num_rsp;
429 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430 }
431 
432 int hci_inquiry(void __user *arg)
433 {
434 	__u8 __user *ptr = arg;
435 	struct hci_inquiry_req ir;
436 	struct hci_dev *hdev;
437 	int err = 0, do_inquiry = 0, max_rsp;
438 	long timeo;
439 	__u8 *buf;
440 
441 	if (copy_from_user(&ir, ptr, sizeof(ir)))
442 		return -EFAULT;
443 
444 	hdev = hci_dev_get(ir.dev_id);
445 	if (!hdev)
446 		return -ENODEV;
447 
448 	hci_dev_lock_bh(hdev);
449 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450 				inquiry_cache_empty(hdev) ||
451 				ir.flags & IREQ_CACHE_FLUSH) {
452 		inquiry_cache_flush(hdev);
453 		do_inquiry = 1;
454 	}
455 	hci_dev_unlock_bh(hdev);
456 
457 	timeo = ir.length * msecs_to_jiffies(2000);
458 
459 	if (do_inquiry) {
460 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 		if (err < 0)
462 			goto done;
463 	}
464 
465 	/* for unlimited number of responses we will use buffer with 255 entries */
466 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467 
468 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 	 * copy it to the user space.
470 	 */
471 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472 	if (!buf) {
473 		err = -ENOMEM;
474 		goto done;
475 	}
476 
477 	hci_dev_lock_bh(hdev);
478 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 	hci_dev_unlock_bh(hdev);
480 
481 	BT_DBG("num_rsp %d", ir.num_rsp);
482 
483 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 		ptr += sizeof(ir);
485 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 					ir.num_rsp))
487 			err = -EFAULT;
488 	} else
489 		err = -EFAULT;
490 
491 	kfree(buf);
492 
493 done:
494 	hci_dev_put(hdev);
495 	return err;
496 }
497 
498 /* ---- HCI ioctl helpers ---- */
499 
500 int hci_dev_open(__u16 dev)
501 {
502 	struct hci_dev *hdev;
503 	int ret = 0;
504 
505 	hdev = hci_dev_get(dev);
506 	if (!hdev)
507 		return -ENODEV;
508 
509 	BT_DBG("%s %p", hdev->name, hdev);
510 
511 	hci_req_lock(hdev);
512 
513 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514 		ret = -ERFKILL;
515 		goto done;
516 	}
517 
518 	if (test_bit(HCI_UP, &hdev->flags)) {
519 		ret = -EALREADY;
520 		goto done;
521 	}
522 
523 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 		set_bit(HCI_RAW, &hdev->flags);
525 
526 	/* Treat all non BR/EDR controllers as raw devices for now */
527 	if (hdev->dev_type != HCI_BREDR)
528 		set_bit(HCI_RAW, &hdev->flags);
529 
530 	if (hdev->open(hdev)) {
531 		ret = -EIO;
532 		goto done;
533 	}
534 
535 	if (!test_bit(HCI_RAW, &hdev->flags)) {
536 		atomic_set(&hdev->cmd_cnt, 1);
537 		set_bit(HCI_INIT, &hdev->flags);
538 		hdev->init_last_cmd = 0;
539 
540 		ret = __hci_request(hdev, hci_init_req, 0,
541 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
542 
543 		if (lmp_host_le_capable(hdev))
544 			ret = __hci_request(hdev, hci_le_init_req, 0,
545 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
546 
547 		clear_bit(HCI_INIT, &hdev->flags);
548 	}
549 
550 	if (!ret) {
551 		hci_dev_hold(hdev);
552 		set_bit(HCI_UP, &hdev->flags);
553 		hci_notify(hdev, HCI_DEV_UP);
554 		if (!test_bit(HCI_SETUP, &hdev->flags))
555 			mgmt_powered(hdev->id, 1);
556 	} else {
557 		/* Init failed, cleanup */
558 		tasklet_kill(&hdev->rx_task);
559 		tasklet_kill(&hdev->tx_task);
560 		tasklet_kill(&hdev->cmd_task);
561 
562 		skb_queue_purge(&hdev->cmd_q);
563 		skb_queue_purge(&hdev->rx_q);
564 
565 		if (hdev->flush)
566 			hdev->flush(hdev);
567 
568 		if (hdev->sent_cmd) {
569 			kfree_skb(hdev->sent_cmd);
570 			hdev->sent_cmd = NULL;
571 		}
572 
573 		hdev->close(hdev);
574 		hdev->flags = 0;
575 	}
576 
577 done:
578 	hci_req_unlock(hdev);
579 	hci_dev_put(hdev);
580 	return ret;
581 }
582 
583 static int hci_dev_do_close(struct hci_dev *hdev)
584 {
585 	BT_DBG("%s %p", hdev->name, hdev);
586 
587 	hci_req_cancel(hdev, ENODEV);
588 	hci_req_lock(hdev);
589 
590 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
591 		del_timer_sync(&hdev->cmd_timer);
592 		hci_req_unlock(hdev);
593 		return 0;
594 	}
595 
596 	/* Kill RX and TX tasks */
597 	tasklet_kill(&hdev->rx_task);
598 	tasklet_kill(&hdev->tx_task);
599 
600 	hci_dev_lock_bh(hdev);
601 	inquiry_cache_flush(hdev);
602 	hci_conn_hash_flush(hdev);
603 	hci_dev_unlock_bh(hdev);
604 
605 	hci_notify(hdev, HCI_DEV_DOWN);
606 
607 	if (hdev->flush)
608 		hdev->flush(hdev);
609 
610 	/* Reset device */
611 	skb_queue_purge(&hdev->cmd_q);
612 	atomic_set(&hdev->cmd_cnt, 1);
613 	if (!test_bit(HCI_RAW, &hdev->flags)) {
614 		set_bit(HCI_INIT, &hdev->flags);
615 		__hci_request(hdev, hci_reset_req, 0,
616 					msecs_to_jiffies(250));
617 		clear_bit(HCI_INIT, &hdev->flags);
618 	}
619 
620 	/* Kill cmd task */
621 	tasklet_kill(&hdev->cmd_task);
622 
623 	/* Drop queues */
624 	skb_queue_purge(&hdev->rx_q);
625 	skb_queue_purge(&hdev->cmd_q);
626 	skb_queue_purge(&hdev->raw_q);
627 
628 	/* Drop last sent command */
629 	if (hdev->sent_cmd) {
630 		del_timer_sync(&hdev->cmd_timer);
631 		kfree_skb(hdev->sent_cmd);
632 		hdev->sent_cmd = NULL;
633 	}
634 
635 	/* After this point our queues are empty
636 	 * and no tasks are scheduled. */
637 	hdev->close(hdev);
638 
639 	mgmt_powered(hdev->id, 0);
640 
641 	/* Clear flags */
642 	hdev->flags = 0;
643 
644 	hci_req_unlock(hdev);
645 
646 	hci_dev_put(hdev);
647 	return 0;
648 }
649 
650 int hci_dev_close(__u16 dev)
651 {
652 	struct hci_dev *hdev;
653 	int err;
654 
655 	hdev = hci_dev_get(dev);
656 	if (!hdev)
657 		return -ENODEV;
658 	err = hci_dev_do_close(hdev);
659 	hci_dev_put(hdev);
660 	return err;
661 }
662 
663 int hci_dev_reset(__u16 dev)
664 {
665 	struct hci_dev *hdev;
666 	int ret = 0;
667 
668 	hdev = hci_dev_get(dev);
669 	if (!hdev)
670 		return -ENODEV;
671 
672 	hci_req_lock(hdev);
673 	tasklet_disable(&hdev->tx_task);
674 
675 	if (!test_bit(HCI_UP, &hdev->flags))
676 		goto done;
677 
678 	/* Drop queues */
679 	skb_queue_purge(&hdev->rx_q);
680 	skb_queue_purge(&hdev->cmd_q);
681 
682 	hci_dev_lock_bh(hdev);
683 	inquiry_cache_flush(hdev);
684 	hci_conn_hash_flush(hdev);
685 	hci_dev_unlock_bh(hdev);
686 
687 	if (hdev->flush)
688 		hdev->flush(hdev);
689 
690 	atomic_set(&hdev->cmd_cnt, 1);
691 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
692 
693 	if (!test_bit(HCI_RAW, &hdev->flags))
694 		ret = __hci_request(hdev, hci_reset_req, 0,
695 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 
697 done:
698 	tasklet_enable(&hdev->tx_task);
699 	hci_req_unlock(hdev);
700 	hci_dev_put(hdev);
701 	return ret;
702 }
703 
704 int hci_dev_reset_stat(__u16 dev)
705 {
706 	struct hci_dev *hdev;
707 	int ret = 0;
708 
709 	hdev = hci_dev_get(dev);
710 	if (!hdev)
711 		return -ENODEV;
712 
713 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714 
715 	hci_dev_put(hdev);
716 
717 	return ret;
718 }
719 
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
721 {
722 	struct hci_dev *hdev;
723 	struct hci_dev_req dr;
724 	int err = 0;
725 
726 	if (copy_from_user(&dr, arg, sizeof(dr)))
727 		return -EFAULT;
728 
729 	hdev = hci_dev_get(dr.dev_id);
730 	if (!hdev)
731 		return -ENODEV;
732 
733 	switch (cmd) {
734 	case HCISETAUTH:
735 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
737 		break;
738 
739 	case HCISETENCRYPT:
740 		if (!lmp_encrypt_capable(hdev)) {
741 			err = -EOPNOTSUPP;
742 			break;
743 		}
744 
745 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
746 			/* Auth must be enabled first */
747 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
749 			if (err)
750 				break;
751 		}
752 
753 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
755 		break;
756 
757 	case HCISETSCAN:
758 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 		break;
761 
762 	case HCISETLINKPOL:
763 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
765 		break;
766 
767 	case HCISETLINKMODE:
768 		hdev->link_mode = ((__u16) dr.dev_opt) &
769 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
770 		break;
771 
772 	case HCISETPTYPE:
773 		hdev->pkt_type = (__u16) dr.dev_opt;
774 		break;
775 
776 	case HCISETACLMTU:
777 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
778 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
779 		break;
780 
781 	case HCISETSCOMTU:
782 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
783 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
784 		break;
785 
786 	default:
787 		err = -EINVAL;
788 		break;
789 	}
790 
791 	hci_dev_put(hdev);
792 	return err;
793 }
794 
795 int hci_get_dev_list(void __user *arg)
796 {
797 	struct hci_dev_list_req *dl;
798 	struct hci_dev_req *dr;
799 	struct list_head *p;
800 	int n = 0, size, err;
801 	__u16 dev_num;
802 
803 	if (get_user(dev_num, (__u16 __user *) arg))
804 		return -EFAULT;
805 
806 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807 		return -EINVAL;
808 
809 	size = sizeof(*dl) + dev_num * sizeof(*dr);
810 
811 	dl = kzalloc(size, GFP_KERNEL);
812 	if (!dl)
813 		return -ENOMEM;
814 
815 	dr = dl->dev_req;
816 
817 	read_lock_bh(&hci_dev_list_lock);
818 	list_for_each(p, &hci_dev_list) {
819 		struct hci_dev *hdev;
820 
821 		hdev = list_entry(p, struct hci_dev, list);
822 
823 		hci_del_off_timer(hdev);
824 
825 		if (!test_bit(HCI_MGMT, &hdev->flags))
826 			set_bit(HCI_PAIRABLE, &hdev->flags);
827 
828 		(dr + n)->dev_id  = hdev->id;
829 		(dr + n)->dev_opt = hdev->flags;
830 
831 		if (++n >= dev_num)
832 			break;
833 	}
834 	read_unlock_bh(&hci_dev_list_lock);
835 
836 	dl->dev_num = n;
837 	size = sizeof(*dl) + n * sizeof(*dr);
838 
839 	err = copy_to_user(arg, dl, size);
840 	kfree(dl);
841 
842 	return err ? -EFAULT : 0;
843 }
844 
845 int hci_get_dev_info(void __user *arg)
846 {
847 	struct hci_dev *hdev;
848 	struct hci_dev_info di;
849 	int err = 0;
850 
851 	if (copy_from_user(&di, arg, sizeof(di)))
852 		return -EFAULT;
853 
854 	hdev = hci_dev_get(di.dev_id);
855 	if (!hdev)
856 		return -ENODEV;
857 
858 	hci_del_off_timer(hdev);
859 
860 	if (!test_bit(HCI_MGMT, &hdev->flags))
861 		set_bit(HCI_PAIRABLE, &hdev->flags);
862 
863 	strcpy(di.name, hdev->name);
864 	di.bdaddr   = hdev->bdaddr;
865 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866 	di.flags    = hdev->flags;
867 	di.pkt_type = hdev->pkt_type;
868 	di.acl_mtu  = hdev->acl_mtu;
869 	di.acl_pkts = hdev->acl_pkts;
870 	di.sco_mtu  = hdev->sco_mtu;
871 	di.sco_pkts = hdev->sco_pkts;
872 	di.link_policy = hdev->link_policy;
873 	di.link_mode   = hdev->link_mode;
874 
875 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 	memcpy(&di.features, &hdev->features, sizeof(di.features));
877 
878 	if (copy_to_user(arg, &di, sizeof(di)))
879 		err = -EFAULT;
880 
881 	hci_dev_put(hdev);
882 
883 	return err;
884 }
885 
886 /* ---- Interface to HCI drivers ---- */
887 
888 static int hci_rfkill_set_block(void *data, bool blocked)
889 {
890 	struct hci_dev *hdev = data;
891 
892 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893 
894 	if (!blocked)
895 		return 0;
896 
897 	hci_dev_do_close(hdev);
898 
899 	return 0;
900 }
901 
902 static const struct rfkill_ops hci_rfkill_ops = {
903 	.set_block = hci_rfkill_set_block,
904 };
905 
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
908 {
909 	struct hci_dev *hdev;
910 
911 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912 	if (!hdev)
913 		return NULL;
914 
915 	skb_queue_head_init(&hdev->driver_init);
916 
917 	return hdev;
918 }
919 EXPORT_SYMBOL(hci_alloc_dev);
920 
921 /* Free HCI device */
922 void hci_free_dev(struct hci_dev *hdev)
923 {
924 	skb_queue_purge(&hdev->driver_init);
925 
926 	/* will free via device release */
927 	put_device(&hdev->dev);
928 }
929 EXPORT_SYMBOL(hci_free_dev);
930 
931 static void hci_power_on(struct work_struct *work)
932 {
933 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934 
935 	BT_DBG("%s", hdev->name);
936 
937 	if (hci_dev_open(hdev->id) < 0)
938 		return;
939 
940 	if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 		mod_timer(&hdev->off_timer,
942 				jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943 
944 	if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 		mgmt_index_added(hdev->id);
946 }
947 
948 static void hci_power_off(struct work_struct *work)
949 {
950 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951 
952 	BT_DBG("%s", hdev->name);
953 
954 	hci_dev_close(hdev->id);
955 }
956 
957 static void hci_auto_off(unsigned long data)
958 {
959 	struct hci_dev *hdev = (struct hci_dev *) data;
960 
961 	BT_DBG("%s", hdev->name);
962 
963 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
964 
965 	queue_work(hdev->workqueue, &hdev->power_off);
966 }
967 
968 void hci_del_off_timer(struct hci_dev *hdev)
969 {
970 	BT_DBG("%s", hdev->name);
971 
972 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 	del_timer(&hdev->off_timer);
974 }
975 
976 int hci_uuids_clear(struct hci_dev *hdev)
977 {
978 	struct list_head *p, *n;
979 
980 	list_for_each_safe(p, n, &hdev->uuids) {
981 		struct bt_uuid *uuid;
982 
983 		uuid = list_entry(p, struct bt_uuid, list);
984 
985 		list_del(p);
986 		kfree(uuid);
987 	}
988 
989 	return 0;
990 }
991 
992 int hci_link_keys_clear(struct hci_dev *hdev)
993 {
994 	struct list_head *p, *n;
995 
996 	list_for_each_safe(p, n, &hdev->link_keys) {
997 		struct link_key *key;
998 
999 		key = list_entry(p, struct link_key, list);
1000 
1001 		list_del(p);
1002 		kfree(key);
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009 {
1010 	struct list_head *p;
1011 
1012 	list_for_each(p, &hdev->link_keys) {
1013 		struct link_key *k;
1014 
1015 		k = list_entry(p, struct link_key, list);
1016 
1017 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1018 			return k;
1019 	}
1020 
1021 	return NULL;
1022 }
1023 
1024 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 						u8 key_type, u8 old_key_type)
1026 {
1027 	/* Legacy key */
1028 	if (key_type < 0x03)
1029 		return 1;
1030 
1031 	/* Debug keys are insecure so don't store them persistently */
1032 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1033 		return 0;
1034 
1035 	/* Changed combination key and there's no previous one */
1036 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1037 		return 0;
1038 
1039 	/* Security mode 3 case */
1040 	if (!conn)
1041 		return 1;
1042 
1043 	/* Neither local nor remote side had no-bonding as requirement */
1044 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1045 		return 1;
1046 
1047 	/* Local side had dedicated bonding as requirement */
1048 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1049 		return 1;
1050 
1051 	/* Remote side had dedicated bonding as requirement */
1052 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1053 		return 1;
1054 
1055 	/* If none of the above criteria match, then don't store the key
1056 	 * persistently */
1057 	return 0;
1058 }
1059 
1060 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1061 {
1062 	struct link_key *k;
1063 
1064 	list_for_each_entry(k, &hdev->link_keys, list) {
1065 		struct key_master_id *id;
1066 
1067 		if (k->type != HCI_LK_SMP_LTK)
1068 			continue;
1069 
1070 		if (k->dlen != sizeof(*id))
1071 			continue;
1072 
1073 		id = (void *) &k->data;
1074 		if (id->ediv == ediv &&
1075 				(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1076 			return k;
1077 	}
1078 
1079 	return NULL;
1080 }
1081 EXPORT_SYMBOL(hci_find_ltk);
1082 
1083 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1084 					bdaddr_t *bdaddr, u8 type)
1085 {
1086 	struct link_key *k;
1087 
1088 	list_for_each_entry(k, &hdev->link_keys, list)
1089 		if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 			return k;
1091 
1092 	return NULL;
1093 }
1094 EXPORT_SYMBOL(hci_find_link_key_type);
1095 
1096 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1097 				bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1098 {
1099 	struct link_key *key, *old_key;
1100 	u8 old_key_type, persistent;
1101 
1102 	old_key = hci_find_link_key(hdev, bdaddr);
1103 	if (old_key) {
1104 		old_key_type = old_key->type;
1105 		key = old_key;
1106 	} else {
1107 		old_key_type = conn ? conn->key_type : 0xff;
1108 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1109 		if (!key)
1110 			return -ENOMEM;
1111 		list_add(&key->list, &hdev->link_keys);
1112 	}
1113 
1114 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1115 
1116 	/* Some buggy controller combinations generate a changed
1117 	 * combination key for legacy pairing even when there's no
1118 	 * previous key */
1119 	if (type == HCI_LK_CHANGED_COMBINATION &&
1120 					(!conn || conn->remote_auth == 0xff) &&
1121 					old_key_type == 0xff) {
1122 		type = HCI_LK_COMBINATION;
1123 		if (conn)
1124 			conn->key_type = type;
1125 	}
1126 
1127 	bacpy(&key->bdaddr, bdaddr);
1128 	memcpy(key->val, val, 16);
1129 	key->pin_len = pin_len;
1130 
1131 	if (type == HCI_LK_CHANGED_COMBINATION)
1132 		key->type = old_key_type;
1133 	else
1134 		key->type = type;
1135 
1136 	if (!new_key)
1137 		return 0;
1138 
1139 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1140 
1141 	mgmt_new_key(hdev->id, key, persistent);
1142 
1143 	if (!persistent) {
1144 		list_del(&key->list);
1145 		kfree(key);
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1152 			u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1153 {
1154 	struct link_key *key, *old_key;
1155 	struct key_master_id *id;
1156 	u8 old_key_type;
1157 
1158 	BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1159 
1160 	old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1161 	if (old_key) {
1162 		key = old_key;
1163 		old_key_type = old_key->type;
1164 	} else {
1165 		key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1166 		if (!key)
1167 			return -ENOMEM;
1168 		list_add(&key->list, &hdev->link_keys);
1169 		old_key_type = 0xff;
1170 	}
1171 
1172 	key->dlen = sizeof(*id);
1173 
1174 	bacpy(&key->bdaddr, bdaddr);
1175 	memcpy(key->val, ltk, sizeof(key->val));
1176 	key->type = HCI_LK_SMP_LTK;
1177 	key->pin_len = key_size;
1178 
1179 	id = (void *) &key->data;
1180 	id->ediv = ediv;
1181 	memcpy(id->rand, rand, sizeof(id->rand));
1182 
1183 	if (new_key)
1184 		mgmt_new_key(hdev->id, key, old_key_type);
1185 
1186 	return 0;
1187 }
1188 
1189 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1190 {
1191 	struct link_key *key;
1192 
1193 	key = hci_find_link_key(hdev, bdaddr);
1194 	if (!key)
1195 		return -ENOENT;
1196 
1197 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1198 
1199 	list_del(&key->list);
1200 	kfree(key);
1201 
1202 	return 0;
1203 }
1204 
1205 /* HCI command timer function */
1206 static void hci_cmd_timer(unsigned long arg)
1207 {
1208 	struct hci_dev *hdev = (void *) arg;
1209 
1210 	BT_ERR("%s command tx timeout", hdev->name);
1211 	atomic_set(&hdev->cmd_cnt, 1);
1212 	clear_bit(HCI_RESET, &hdev->flags);
1213 	tasklet_schedule(&hdev->cmd_task);
1214 }
1215 
1216 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1217 							bdaddr_t *bdaddr)
1218 {
1219 	struct oob_data *data;
1220 
1221 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1222 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1223 			return data;
1224 
1225 	return NULL;
1226 }
1227 
1228 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1229 {
1230 	struct oob_data *data;
1231 
1232 	data = hci_find_remote_oob_data(hdev, bdaddr);
1233 	if (!data)
1234 		return -ENOENT;
1235 
1236 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1237 
1238 	list_del(&data->list);
1239 	kfree(data);
1240 
1241 	return 0;
1242 }
1243 
1244 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1245 {
1246 	struct oob_data *data, *n;
1247 
1248 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1249 		list_del(&data->list);
1250 		kfree(data);
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1257 								u8 *randomizer)
1258 {
1259 	struct oob_data *data;
1260 
1261 	data = hci_find_remote_oob_data(hdev, bdaddr);
1262 
1263 	if (!data) {
1264 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1265 		if (!data)
1266 			return -ENOMEM;
1267 
1268 		bacpy(&data->bdaddr, bdaddr);
1269 		list_add(&data->list, &hdev->remote_oob_data);
1270 	}
1271 
1272 	memcpy(data->hash, hash, sizeof(data->hash));
1273 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1274 
1275 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1276 
1277 	return 0;
1278 }
1279 
1280 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1281 						bdaddr_t *bdaddr)
1282 {
1283 	struct list_head *p;
1284 
1285 	list_for_each(p, &hdev->blacklist) {
1286 		struct bdaddr_list *b;
1287 
1288 		b = list_entry(p, struct bdaddr_list, list);
1289 
1290 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1291 			return b;
1292 	}
1293 
1294 	return NULL;
1295 }
1296 
1297 int hci_blacklist_clear(struct hci_dev *hdev)
1298 {
1299 	struct list_head *p, *n;
1300 
1301 	list_for_each_safe(p, n, &hdev->blacklist) {
1302 		struct bdaddr_list *b;
1303 
1304 		b = list_entry(p, struct bdaddr_list, list);
1305 
1306 		list_del(p);
1307 		kfree(b);
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1314 {
1315 	struct bdaddr_list *entry;
1316 	int err;
1317 
1318 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1319 		return -EBADF;
1320 
1321 	hci_dev_lock_bh(hdev);
1322 
1323 	if (hci_blacklist_lookup(hdev, bdaddr)) {
1324 		err = -EEXIST;
1325 		goto err;
1326 	}
1327 
1328 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1329 	if (!entry) {
1330 		return -ENOMEM;
1331 		goto err;
1332 	}
1333 
1334 	bacpy(&entry->bdaddr, bdaddr);
1335 
1336 	list_add(&entry->list, &hdev->blacklist);
1337 
1338 	err = 0;
1339 
1340 err:
1341 	hci_dev_unlock_bh(hdev);
1342 	return err;
1343 }
1344 
1345 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1346 {
1347 	struct bdaddr_list *entry;
1348 	int err = 0;
1349 
1350 	hci_dev_lock_bh(hdev);
1351 
1352 	if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1353 		hci_blacklist_clear(hdev);
1354 		goto done;
1355 	}
1356 
1357 	entry = hci_blacklist_lookup(hdev, bdaddr);
1358 	if (!entry) {
1359 		err = -ENOENT;
1360 		goto done;
1361 	}
1362 
1363 	list_del(&entry->list);
1364 	kfree(entry);
1365 
1366 done:
1367 	hci_dev_unlock_bh(hdev);
1368 	return err;
1369 }
1370 
1371 static void hci_clear_adv_cache(unsigned long arg)
1372 {
1373 	struct hci_dev *hdev = (void *) arg;
1374 
1375 	hci_dev_lock(hdev);
1376 
1377 	hci_adv_entries_clear(hdev);
1378 
1379 	hci_dev_unlock(hdev);
1380 }
1381 
1382 int hci_adv_entries_clear(struct hci_dev *hdev)
1383 {
1384 	struct adv_entry *entry, *tmp;
1385 
1386 	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1387 		list_del(&entry->list);
1388 		kfree(entry);
1389 	}
1390 
1391 	BT_DBG("%s adv cache cleared", hdev->name);
1392 
1393 	return 0;
1394 }
1395 
1396 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397 {
1398 	struct adv_entry *entry;
1399 
1400 	list_for_each_entry(entry, &hdev->adv_entries, list)
1401 		if (bacmp(bdaddr, &entry->bdaddr) == 0)
1402 			return entry;
1403 
1404 	return NULL;
1405 }
1406 
1407 static inline int is_connectable_adv(u8 evt_type)
1408 {
1409 	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1410 		return 1;
1411 
1412 	return 0;
1413 }
1414 
1415 int hci_add_adv_entry(struct hci_dev *hdev,
1416 					struct hci_ev_le_advertising_info *ev)
1417 {
1418 	struct adv_entry *entry;
1419 
1420 	if (!is_connectable_adv(ev->evt_type))
1421 		return -EINVAL;
1422 
1423 	/* Only new entries should be added to adv_entries. So, if
1424 	 * bdaddr was found, don't add it. */
1425 	if (hci_find_adv_entry(hdev, &ev->bdaddr))
1426 		return 0;
1427 
1428 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1429 	if (!entry)
1430 		return -ENOMEM;
1431 
1432 	bacpy(&entry->bdaddr, &ev->bdaddr);
1433 	entry->bdaddr_type = ev->bdaddr_type;
1434 
1435 	list_add(&entry->list, &hdev->adv_entries);
1436 
1437 	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1438 				batostr(&entry->bdaddr), entry->bdaddr_type);
1439 
1440 	return 0;
1441 }
1442 
1443 /* Register HCI device */
1444 int hci_register_dev(struct hci_dev *hdev)
1445 {
1446 	struct list_head *head = &hci_dev_list, *p;
1447 	int i, id = 0;
1448 
1449 	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1450 						hdev->bus, hdev->owner);
1451 
1452 	if (!hdev->open || !hdev->close || !hdev->destruct)
1453 		return -EINVAL;
1454 
1455 	write_lock_bh(&hci_dev_list_lock);
1456 
1457 	/* Find first available device id */
1458 	list_for_each(p, &hci_dev_list) {
1459 		if (list_entry(p, struct hci_dev, list)->id != id)
1460 			break;
1461 		head = p; id++;
1462 	}
1463 
1464 	sprintf(hdev->name, "hci%d", id);
1465 	hdev->id = id;
1466 	list_add(&hdev->list, head);
1467 
1468 	atomic_set(&hdev->refcnt, 1);
1469 	spin_lock_init(&hdev->lock);
1470 
1471 	hdev->flags = 0;
1472 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1473 	hdev->esco_type = (ESCO_HV1);
1474 	hdev->link_mode = (HCI_LM_ACCEPT);
1475 	hdev->io_capability = 0x03; /* No Input No Output */
1476 
1477 	hdev->idle_timeout = 0;
1478 	hdev->sniff_max_interval = 800;
1479 	hdev->sniff_min_interval = 80;
1480 
1481 	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1482 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1483 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1484 
1485 	skb_queue_head_init(&hdev->rx_q);
1486 	skb_queue_head_init(&hdev->cmd_q);
1487 	skb_queue_head_init(&hdev->raw_q);
1488 
1489 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1490 
1491 	for (i = 0; i < NUM_REASSEMBLY; i++)
1492 		hdev->reassembly[i] = NULL;
1493 
1494 	init_waitqueue_head(&hdev->req_wait_q);
1495 	mutex_init(&hdev->req_lock);
1496 
1497 	inquiry_cache_init(hdev);
1498 
1499 	hci_conn_hash_init(hdev);
1500 
1501 	INIT_LIST_HEAD(&hdev->blacklist);
1502 
1503 	INIT_LIST_HEAD(&hdev->uuids);
1504 
1505 	INIT_LIST_HEAD(&hdev->link_keys);
1506 
1507 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1508 
1509 	INIT_LIST_HEAD(&hdev->adv_entries);
1510 	setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1511 						(unsigned long) hdev);
1512 
1513 	INIT_WORK(&hdev->power_on, hci_power_on);
1514 	INIT_WORK(&hdev->power_off, hci_power_off);
1515 	setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1516 
1517 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1518 
1519 	atomic_set(&hdev->promisc, 0);
1520 
1521 	write_unlock_bh(&hci_dev_list_lock);
1522 
1523 	hdev->workqueue = create_singlethread_workqueue(hdev->name);
1524 	if (!hdev->workqueue)
1525 		goto nomem;
1526 
1527 	hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1528 	if (IS_ERR(hdev->tfm))
1529 		BT_INFO("Failed to load transform for ecb(aes): %ld",
1530 							PTR_ERR(hdev->tfm));
1531 
1532 	hci_register_sysfs(hdev);
1533 
1534 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1535 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1536 	if (hdev->rfkill) {
1537 		if (rfkill_register(hdev->rfkill) < 0) {
1538 			rfkill_destroy(hdev->rfkill);
1539 			hdev->rfkill = NULL;
1540 		}
1541 	}
1542 
1543 	set_bit(HCI_AUTO_OFF, &hdev->flags);
1544 	set_bit(HCI_SETUP, &hdev->flags);
1545 	queue_work(hdev->workqueue, &hdev->power_on);
1546 
1547 	hci_notify(hdev, HCI_DEV_REG);
1548 
1549 	return id;
1550 
1551 nomem:
1552 	write_lock_bh(&hci_dev_list_lock);
1553 	list_del(&hdev->list);
1554 	write_unlock_bh(&hci_dev_list_lock);
1555 
1556 	return -ENOMEM;
1557 }
1558 EXPORT_SYMBOL(hci_register_dev);
1559 
1560 /* Unregister HCI device */
1561 int hci_unregister_dev(struct hci_dev *hdev)
1562 {
1563 	int i;
1564 
1565 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1566 
1567 	write_lock_bh(&hci_dev_list_lock);
1568 	list_del(&hdev->list);
1569 	write_unlock_bh(&hci_dev_list_lock);
1570 
1571 	hci_dev_do_close(hdev);
1572 
1573 	for (i = 0; i < NUM_REASSEMBLY; i++)
1574 		kfree_skb(hdev->reassembly[i]);
1575 
1576 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1577 					!test_bit(HCI_SETUP, &hdev->flags))
1578 		mgmt_index_removed(hdev->id);
1579 
1580 	if (!IS_ERR(hdev->tfm))
1581 		crypto_free_blkcipher(hdev->tfm);
1582 
1583 	hci_notify(hdev, HCI_DEV_UNREG);
1584 
1585 	if (hdev->rfkill) {
1586 		rfkill_unregister(hdev->rfkill);
1587 		rfkill_destroy(hdev->rfkill);
1588 	}
1589 
1590 	hci_unregister_sysfs(hdev);
1591 
1592 	hci_del_off_timer(hdev);
1593 	del_timer(&hdev->adv_timer);
1594 
1595 	destroy_workqueue(hdev->workqueue);
1596 
1597 	hci_dev_lock_bh(hdev);
1598 	hci_blacklist_clear(hdev);
1599 	hci_uuids_clear(hdev);
1600 	hci_link_keys_clear(hdev);
1601 	hci_remote_oob_data_clear(hdev);
1602 	hci_adv_entries_clear(hdev);
1603 	hci_dev_unlock_bh(hdev);
1604 
1605 	__hci_dev_put(hdev);
1606 
1607 	return 0;
1608 }
1609 EXPORT_SYMBOL(hci_unregister_dev);
1610 
1611 /* Suspend HCI device */
1612 int hci_suspend_dev(struct hci_dev *hdev)
1613 {
1614 	hci_notify(hdev, HCI_DEV_SUSPEND);
1615 	return 0;
1616 }
1617 EXPORT_SYMBOL(hci_suspend_dev);
1618 
1619 /* Resume HCI device */
1620 int hci_resume_dev(struct hci_dev *hdev)
1621 {
1622 	hci_notify(hdev, HCI_DEV_RESUME);
1623 	return 0;
1624 }
1625 EXPORT_SYMBOL(hci_resume_dev);
1626 
1627 /* Receive frame from HCI drivers */
1628 int hci_recv_frame(struct sk_buff *skb)
1629 {
1630 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1631 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1632 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1633 		kfree_skb(skb);
1634 		return -ENXIO;
1635 	}
1636 
1637 	/* Incomming skb */
1638 	bt_cb(skb)->incoming = 1;
1639 
1640 	/* Time stamp */
1641 	__net_timestamp(skb);
1642 
1643 	/* Queue frame for rx task */
1644 	skb_queue_tail(&hdev->rx_q, skb);
1645 	tasklet_schedule(&hdev->rx_task);
1646 
1647 	return 0;
1648 }
1649 EXPORT_SYMBOL(hci_recv_frame);
1650 
1651 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1652 						  int count, __u8 index)
1653 {
1654 	int len = 0;
1655 	int hlen = 0;
1656 	int remain = count;
1657 	struct sk_buff *skb;
1658 	struct bt_skb_cb *scb;
1659 
1660 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1661 				index >= NUM_REASSEMBLY)
1662 		return -EILSEQ;
1663 
1664 	skb = hdev->reassembly[index];
1665 
1666 	if (!skb) {
1667 		switch (type) {
1668 		case HCI_ACLDATA_PKT:
1669 			len = HCI_MAX_FRAME_SIZE;
1670 			hlen = HCI_ACL_HDR_SIZE;
1671 			break;
1672 		case HCI_EVENT_PKT:
1673 			len = HCI_MAX_EVENT_SIZE;
1674 			hlen = HCI_EVENT_HDR_SIZE;
1675 			break;
1676 		case HCI_SCODATA_PKT:
1677 			len = HCI_MAX_SCO_SIZE;
1678 			hlen = HCI_SCO_HDR_SIZE;
1679 			break;
1680 		}
1681 
1682 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1683 		if (!skb)
1684 			return -ENOMEM;
1685 
1686 		scb = (void *) skb->cb;
1687 		scb->expect = hlen;
1688 		scb->pkt_type = type;
1689 
1690 		skb->dev = (void *) hdev;
1691 		hdev->reassembly[index] = skb;
1692 	}
1693 
1694 	while (count) {
1695 		scb = (void *) skb->cb;
1696 		len = min(scb->expect, (__u16)count);
1697 
1698 		memcpy(skb_put(skb, len), data, len);
1699 
1700 		count -= len;
1701 		data += len;
1702 		scb->expect -= len;
1703 		remain = count;
1704 
1705 		switch (type) {
1706 		case HCI_EVENT_PKT:
1707 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1708 				struct hci_event_hdr *h = hci_event_hdr(skb);
1709 				scb->expect = h->plen;
1710 
1711 				if (skb_tailroom(skb) < scb->expect) {
1712 					kfree_skb(skb);
1713 					hdev->reassembly[index] = NULL;
1714 					return -ENOMEM;
1715 				}
1716 			}
1717 			break;
1718 
1719 		case HCI_ACLDATA_PKT:
1720 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1721 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1722 				scb->expect = __le16_to_cpu(h->dlen);
1723 
1724 				if (skb_tailroom(skb) < scb->expect) {
1725 					kfree_skb(skb);
1726 					hdev->reassembly[index] = NULL;
1727 					return -ENOMEM;
1728 				}
1729 			}
1730 			break;
1731 
1732 		case HCI_SCODATA_PKT:
1733 			if (skb->len == HCI_SCO_HDR_SIZE) {
1734 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1735 				scb->expect = h->dlen;
1736 
1737 				if (skb_tailroom(skb) < scb->expect) {
1738 					kfree_skb(skb);
1739 					hdev->reassembly[index] = NULL;
1740 					return -ENOMEM;
1741 				}
1742 			}
1743 			break;
1744 		}
1745 
1746 		if (scb->expect == 0) {
1747 			/* Complete frame */
1748 
1749 			bt_cb(skb)->pkt_type = type;
1750 			hci_recv_frame(skb);
1751 
1752 			hdev->reassembly[index] = NULL;
1753 			return remain;
1754 		}
1755 	}
1756 
1757 	return remain;
1758 }
1759 
1760 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1761 {
1762 	int rem = 0;
1763 
1764 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1765 		return -EILSEQ;
1766 
1767 	while (count) {
1768 		rem = hci_reassembly(hdev, type, data, count, type - 1);
1769 		if (rem < 0)
1770 			return rem;
1771 
1772 		data += (count - rem);
1773 		count = rem;
1774 	}
1775 
1776 	return rem;
1777 }
1778 EXPORT_SYMBOL(hci_recv_fragment);
1779 
1780 #define STREAM_REASSEMBLY 0
1781 
1782 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1783 {
1784 	int type;
1785 	int rem = 0;
1786 
1787 	while (count) {
1788 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1789 
1790 		if (!skb) {
1791 			struct { char type; } *pkt;
1792 
1793 			/* Start of the frame */
1794 			pkt = data;
1795 			type = pkt->type;
1796 
1797 			data++;
1798 			count--;
1799 		} else
1800 			type = bt_cb(skb)->pkt_type;
1801 
1802 		rem = hci_reassembly(hdev, type, data, count,
1803 							STREAM_REASSEMBLY);
1804 		if (rem < 0)
1805 			return rem;
1806 
1807 		data += (count - rem);
1808 		count = rem;
1809 	}
1810 
1811 	return rem;
1812 }
1813 EXPORT_SYMBOL(hci_recv_stream_fragment);
1814 
1815 /* ---- Interface to upper protocols ---- */
1816 
1817 /* Register/Unregister protocols.
1818  * hci_task_lock is used to ensure that no tasks are running. */
1819 int hci_register_proto(struct hci_proto *hp)
1820 {
1821 	int err = 0;
1822 
1823 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1824 
1825 	if (hp->id >= HCI_MAX_PROTO)
1826 		return -EINVAL;
1827 
1828 	write_lock_bh(&hci_task_lock);
1829 
1830 	if (!hci_proto[hp->id])
1831 		hci_proto[hp->id] = hp;
1832 	else
1833 		err = -EEXIST;
1834 
1835 	write_unlock_bh(&hci_task_lock);
1836 
1837 	return err;
1838 }
1839 EXPORT_SYMBOL(hci_register_proto);
1840 
1841 int hci_unregister_proto(struct hci_proto *hp)
1842 {
1843 	int err = 0;
1844 
1845 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1846 
1847 	if (hp->id >= HCI_MAX_PROTO)
1848 		return -EINVAL;
1849 
1850 	write_lock_bh(&hci_task_lock);
1851 
1852 	if (hci_proto[hp->id])
1853 		hci_proto[hp->id] = NULL;
1854 	else
1855 		err = -ENOENT;
1856 
1857 	write_unlock_bh(&hci_task_lock);
1858 
1859 	return err;
1860 }
1861 EXPORT_SYMBOL(hci_unregister_proto);
1862 
1863 int hci_register_cb(struct hci_cb *cb)
1864 {
1865 	BT_DBG("%p name %s", cb, cb->name);
1866 
1867 	write_lock_bh(&hci_cb_list_lock);
1868 	list_add(&cb->list, &hci_cb_list);
1869 	write_unlock_bh(&hci_cb_list_lock);
1870 
1871 	return 0;
1872 }
1873 EXPORT_SYMBOL(hci_register_cb);
1874 
1875 int hci_unregister_cb(struct hci_cb *cb)
1876 {
1877 	BT_DBG("%p name %s", cb, cb->name);
1878 
1879 	write_lock_bh(&hci_cb_list_lock);
1880 	list_del(&cb->list);
1881 	write_unlock_bh(&hci_cb_list_lock);
1882 
1883 	return 0;
1884 }
1885 EXPORT_SYMBOL(hci_unregister_cb);
1886 
1887 static int hci_send_frame(struct sk_buff *skb)
1888 {
1889 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1890 
1891 	if (!hdev) {
1892 		kfree_skb(skb);
1893 		return -ENODEV;
1894 	}
1895 
1896 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1897 
1898 	if (atomic_read(&hdev->promisc)) {
1899 		/* Time stamp */
1900 		__net_timestamp(skb);
1901 
1902 		hci_send_to_sock(hdev, skb, NULL);
1903 	}
1904 
1905 	/* Get rid of skb owner, prior to sending to the driver. */
1906 	skb_orphan(skb);
1907 
1908 	return hdev->send(skb);
1909 }
1910 
1911 /* Send HCI command */
1912 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1913 {
1914 	int len = HCI_COMMAND_HDR_SIZE + plen;
1915 	struct hci_command_hdr *hdr;
1916 	struct sk_buff *skb;
1917 
1918 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1919 
1920 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1921 	if (!skb) {
1922 		BT_ERR("%s no memory for command", hdev->name);
1923 		return -ENOMEM;
1924 	}
1925 
1926 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1927 	hdr->opcode = cpu_to_le16(opcode);
1928 	hdr->plen   = plen;
1929 
1930 	if (plen)
1931 		memcpy(skb_put(skb, plen), param, plen);
1932 
1933 	BT_DBG("skb len %d", skb->len);
1934 
1935 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1936 	skb->dev = (void *) hdev;
1937 
1938 	if (test_bit(HCI_INIT, &hdev->flags))
1939 		hdev->init_last_cmd = opcode;
1940 
1941 	skb_queue_tail(&hdev->cmd_q, skb);
1942 	tasklet_schedule(&hdev->cmd_task);
1943 
1944 	return 0;
1945 }
1946 
1947 /* Get data from the previously sent command */
1948 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1949 {
1950 	struct hci_command_hdr *hdr;
1951 
1952 	if (!hdev->sent_cmd)
1953 		return NULL;
1954 
1955 	hdr = (void *) hdev->sent_cmd->data;
1956 
1957 	if (hdr->opcode != cpu_to_le16(opcode))
1958 		return NULL;
1959 
1960 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1961 
1962 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1963 }
1964 
1965 /* Send ACL data */
1966 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1967 {
1968 	struct hci_acl_hdr *hdr;
1969 	int len = skb->len;
1970 
1971 	skb_push(skb, HCI_ACL_HDR_SIZE);
1972 	skb_reset_transport_header(skb);
1973 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1974 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1975 	hdr->dlen   = cpu_to_le16(len);
1976 }
1977 
1978 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1979 {
1980 	struct hci_dev *hdev = conn->hdev;
1981 	struct sk_buff *list;
1982 
1983 	BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1984 
1985 	skb->dev = (void *) hdev;
1986 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1987 	hci_add_acl_hdr(skb, conn->handle, flags);
1988 
1989 	list = skb_shinfo(skb)->frag_list;
1990 	if (!list) {
1991 		/* Non fragmented */
1992 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1993 
1994 		skb_queue_tail(&conn->data_q, skb);
1995 	} else {
1996 		/* Fragmented */
1997 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1998 
1999 		skb_shinfo(skb)->frag_list = NULL;
2000 
2001 		/* Queue all fragments atomically */
2002 		spin_lock_bh(&conn->data_q.lock);
2003 
2004 		__skb_queue_tail(&conn->data_q, skb);
2005 
2006 		flags &= ~ACL_START;
2007 		flags |= ACL_CONT;
2008 		do {
2009 			skb = list; list = list->next;
2010 
2011 			skb->dev = (void *) hdev;
2012 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2013 			hci_add_acl_hdr(skb, conn->handle, flags);
2014 
2015 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2016 
2017 			__skb_queue_tail(&conn->data_q, skb);
2018 		} while (list);
2019 
2020 		spin_unlock_bh(&conn->data_q.lock);
2021 	}
2022 
2023 	tasklet_schedule(&hdev->tx_task);
2024 }
2025 EXPORT_SYMBOL(hci_send_acl);
2026 
2027 /* Send SCO data */
2028 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2029 {
2030 	struct hci_dev *hdev = conn->hdev;
2031 	struct hci_sco_hdr hdr;
2032 
2033 	BT_DBG("%s len %d", hdev->name, skb->len);
2034 
2035 	hdr.handle = cpu_to_le16(conn->handle);
2036 	hdr.dlen   = skb->len;
2037 
2038 	skb_push(skb, HCI_SCO_HDR_SIZE);
2039 	skb_reset_transport_header(skb);
2040 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2041 
2042 	skb->dev = (void *) hdev;
2043 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2044 
2045 	skb_queue_tail(&conn->data_q, skb);
2046 	tasklet_schedule(&hdev->tx_task);
2047 }
2048 EXPORT_SYMBOL(hci_send_sco);
2049 
2050 /* ---- HCI TX task (outgoing data) ---- */
2051 
2052 /* HCI Connection scheduler */
2053 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2054 {
2055 	struct hci_conn_hash *h = &hdev->conn_hash;
2056 	struct hci_conn *conn = NULL;
2057 	int num = 0, min = ~0;
2058 	struct list_head *p;
2059 
2060 	/* We don't have to lock device here. Connections are always
2061 	 * added and removed with TX task disabled. */
2062 	list_for_each(p, &h->list) {
2063 		struct hci_conn *c;
2064 		c = list_entry(p, struct hci_conn, list);
2065 
2066 		if (c->type != type || skb_queue_empty(&c->data_q))
2067 			continue;
2068 
2069 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2070 			continue;
2071 
2072 		num++;
2073 
2074 		if (c->sent < min) {
2075 			min  = c->sent;
2076 			conn = c;
2077 		}
2078 	}
2079 
2080 	if (conn) {
2081 		int cnt, q;
2082 
2083 		switch (conn->type) {
2084 		case ACL_LINK:
2085 			cnt = hdev->acl_cnt;
2086 			break;
2087 		case SCO_LINK:
2088 		case ESCO_LINK:
2089 			cnt = hdev->sco_cnt;
2090 			break;
2091 		case LE_LINK:
2092 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2093 			break;
2094 		default:
2095 			cnt = 0;
2096 			BT_ERR("Unknown link type");
2097 		}
2098 
2099 		q = cnt / num;
2100 		*quote = q ? q : 1;
2101 	} else
2102 		*quote = 0;
2103 
2104 	BT_DBG("conn %p quote %d", conn, *quote);
2105 	return conn;
2106 }
2107 
2108 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2109 {
2110 	struct hci_conn_hash *h = &hdev->conn_hash;
2111 	struct list_head *p;
2112 	struct hci_conn  *c;
2113 
2114 	BT_ERR("%s link tx timeout", hdev->name);
2115 
2116 	/* Kill stalled connections */
2117 	list_for_each(p, &h->list) {
2118 		c = list_entry(p, struct hci_conn, list);
2119 		if (c->type == type && c->sent) {
2120 			BT_ERR("%s killing stalled connection %s",
2121 				hdev->name, batostr(&c->dst));
2122 			hci_acl_disconn(c, 0x13);
2123 		}
2124 	}
2125 }
2126 
2127 static inline void hci_sched_acl(struct hci_dev *hdev)
2128 {
2129 	struct hci_conn *conn;
2130 	struct sk_buff *skb;
2131 	int quote;
2132 
2133 	BT_DBG("%s", hdev->name);
2134 
2135 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2136 		/* ACL tx timeout must be longer than maximum
2137 		 * link supervision timeout (40.9 seconds) */
2138 		if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2139 			hci_link_tx_to(hdev, ACL_LINK);
2140 	}
2141 
2142 	while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2143 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2144 			BT_DBG("skb %p len %d", skb, skb->len);
2145 
2146 			hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2147 
2148 			hci_send_frame(skb);
2149 			hdev->acl_last_tx = jiffies;
2150 
2151 			hdev->acl_cnt--;
2152 			conn->sent++;
2153 		}
2154 	}
2155 }
2156 
2157 /* Schedule SCO */
2158 static inline void hci_sched_sco(struct hci_dev *hdev)
2159 {
2160 	struct hci_conn *conn;
2161 	struct sk_buff *skb;
2162 	int quote;
2163 
2164 	BT_DBG("%s", hdev->name);
2165 
2166 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2167 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2168 			BT_DBG("skb %p len %d", skb, skb->len);
2169 			hci_send_frame(skb);
2170 
2171 			conn->sent++;
2172 			if (conn->sent == ~0)
2173 				conn->sent = 0;
2174 		}
2175 	}
2176 }
2177 
2178 static inline void hci_sched_esco(struct hci_dev *hdev)
2179 {
2180 	struct hci_conn *conn;
2181 	struct sk_buff *skb;
2182 	int quote;
2183 
2184 	BT_DBG("%s", hdev->name);
2185 
2186 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2187 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2188 			BT_DBG("skb %p len %d", skb, skb->len);
2189 			hci_send_frame(skb);
2190 
2191 			conn->sent++;
2192 			if (conn->sent == ~0)
2193 				conn->sent = 0;
2194 		}
2195 	}
2196 }
2197 
2198 static inline void hci_sched_le(struct hci_dev *hdev)
2199 {
2200 	struct hci_conn *conn;
2201 	struct sk_buff *skb;
2202 	int quote, cnt;
2203 
2204 	BT_DBG("%s", hdev->name);
2205 
2206 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2207 		/* LE tx timeout must be longer than maximum
2208 		 * link supervision timeout (40.9 seconds) */
2209 		if (!hdev->le_cnt && hdev->le_pkts &&
2210 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2211 			hci_link_tx_to(hdev, LE_LINK);
2212 	}
2213 
2214 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2215 	while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2216 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2217 			BT_DBG("skb %p len %d", skb, skb->len);
2218 
2219 			hci_send_frame(skb);
2220 			hdev->le_last_tx = jiffies;
2221 
2222 			cnt--;
2223 			conn->sent++;
2224 		}
2225 	}
2226 	if (hdev->le_pkts)
2227 		hdev->le_cnt = cnt;
2228 	else
2229 		hdev->acl_cnt = cnt;
2230 }
2231 
2232 static void hci_tx_task(unsigned long arg)
2233 {
2234 	struct hci_dev *hdev = (struct hci_dev *) arg;
2235 	struct sk_buff *skb;
2236 
2237 	read_lock(&hci_task_lock);
2238 
2239 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2240 		hdev->sco_cnt, hdev->le_cnt);
2241 
2242 	/* Schedule queues and send stuff to HCI driver */
2243 
2244 	hci_sched_acl(hdev);
2245 
2246 	hci_sched_sco(hdev);
2247 
2248 	hci_sched_esco(hdev);
2249 
2250 	hci_sched_le(hdev);
2251 
2252 	/* Send next queued raw (unknown type) packet */
2253 	while ((skb = skb_dequeue(&hdev->raw_q)))
2254 		hci_send_frame(skb);
2255 
2256 	read_unlock(&hci_task_lock);
2257 }
2258 
2259 /* ----- HCI RX task (incoming data processing) ----- */
2260 
2261 /* ACL data packet */
2262 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2263 {
2264 	struct hci_acl_hdr *hdr = (void *) skb->data;
2265 	struct hci_conn *conn;
2266 	__u16 handle, flags;
2267 
2268 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2269 
2270 	handle = __le16_to_cpu(hdr->handle);
2271 	flags  = hci_flags(handle);
2272 	handle = hci_handle(handle);
2273 
2274 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2275 
2276 	hdev->stat.acl_rx++;
2277 
2278 	hci_dev_lock(hdev);
2279 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2280 	hci_dev_unlock(hdev);
2281 
2282 	if (conn) {
2283 		register struct hci_proto *hp;
2284 
2285 		hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2286 
2287 		/* Send to upper protocol */
2288 		hp = hci_proto[HCI_PROTO_L2CAP];
2289 		if (hp && hp->recv_acldata) {
2290 			hp->recv_acldata(conn, skb, flags);
2291 			return;
2292 		}
2293 	} else {
2294 		BT_ERR("%s ACL packet for unknown connection handle %d",
2295 			hdev->name, handle);
2296 	}
2297 
2298 	kfree_skb(skb);
2299 }
2300 
2301 /* SCO data packet */
2302 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2303 {
2304 	struct hci_sco_hdr *hdr = (void *) skb->data;
2305 	struct hci_conn *conn;
2306 	__u16 handle;
2307 
2308 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2309 
2310 	handle = __le16_to_cpu(hdr->handle);
2311 
2312 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2313 
2314 	hdev->stat.sco_rx++;
2315 
2316 	hci_dev_lock(hdev);
2317 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2318 	hci_dev_unlock(hdev);
2319 
2320 	if (conn) {
2321 		register struct hci_proto *hp;
2322 
2323 		/* Send to upper protocol */
2324 		hp = hci_proto[HCI_PROTO_SCO];
2325 		if (hp && hp->recv_scodata) {
2326 			hp->recv_scodata(conn, skb);
2327 			return;
2328 		}
2329 	} else {
2330 		BT_ERR("%s SCO packet for unknown connection handle %d",
2331 			hdev->name, handle);
2332 	}
2333 
2334 	kfree_skb(skb);
2335 }
2336 
2337 static void hci_rx_task(unsigned long arg)
2338 {
2339 	struct hci_dev *hdev = (struct hci_dev *) arg;
2340 	struct sk_buff *skb;
2341 
2342 	BT_DBG("%s", hdev->name);
2343 
2344 	read_lock(&hci_task_lock);
2345 
2346 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2347 		if (atomic_read(&hdev->promisc)) {
2348 			/* Send copy to the sockets */
2349 			hci_send_to_sock(hdev, skb, NULL);
2350 		}
2351 
2352 		if (test_bit(HCI_RAW, &hdev->flags)) {
2353 			kfree_skb(skb);
2354 			continue;
2355 		}
2356 
2357 		if (test_bit(HCI_INIT, &hdev->flags)) {
2358 			/* Don't process data packets in this states. */
2359 			switch (bt_cb(skb)->pkt_type) {
2360 			case HCI_ACLDATA_PKT:
2361 			case HCI_SCODATA_PKT:
2362 				kfree_skb(skb);
2363 				continue;
2364 			}
2365 		}
2366 
2367 		/* Process frame */
2368 		switch (bt_cb(skb)->pkt_type) {
2369 		case HCI_EVENT_PKT:
2370 			hci_event_packet(hdev, skb);
2371 			break;
2372 
2373 		case HCI_ACLDATA_PKT:
2374 			BT_DBG("%s ACL data packet", hdev->name);
2375 			hci_acldata_packet(hdev, skb);
2376 			break;
2377 
2378 		case HCI_SCODATA_PKT:
2379 			BT_DBG("%s SCO data packet", hdev->name);
2380 			hci_scodata_packet(hdev, skb);
2381 			break;
2382 
2383 		default:
2384 			kfree_skb(skb);
2385 			break;
2386 		}
2387 	}
2388 
2389 	read_unlock(&hci_task_lock);
2390 }
2391 
2392 static void hci_cmd_task(unsigned long arg)
2393 {
2394 	struct hci_dev *hdev = (struct hci_dev *) arg;
2395 	struct sk_buff *skb;
2396 
2397 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2398 
2399 	/* Send queued commands */
2400 	if (atomic_read(&hdev->cmd_cnt)) {
2401 		skb = skb_dequeue(&hdev->cmd_q);
2402 		if (!skb)
2403 			return;
2404 
2405 		kfree_skb(hdev->sent_cmd);
2406 
2407 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2408 		if (hdev->sent_cmd) {
2409 			atomic_dec(&hdev->cmd_cnt);
2410 			hci_send_frame(skb);
2411 			mod_timer(&hdev->cmd_timer,
2412 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2413 		} else {
2414 			skb_queue_head(&hdev->cmd_q, skb);
2415 			tasklet_schedule(&hdev->cmd_task);
2416 		}
2417 	}
2418 }
2419