xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 9c1f8594)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI core. */
26 
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30 
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47 
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51 
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 
55 #define AUTO_OFF_TIMEOUT 2000
56 
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 
61 static DEFINE_RWLOCK(hci_task_lock);
62 
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66 
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70 
71 /* HCI protocols */
72 #define HCI_MAX_PROTO	2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 
78 /* ---- HCI notifications ---- */
79 
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82 	return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84 
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89 
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94 
95 /* ---- HCI requests ---- */
96 
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100 
101 	/* If this is the init phase check if the completed command matches
102 	 * the last init command, and if not just return.
103 	 */
104 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 		return;
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
117 
118 	if (hdev->req_status == HCI_REQ_PEND) {
119 		hdev->req_result = err;
120 		hdev->req_status = HCI_REQ_CANCELED;
121 		wake_up_interruptible(&hdev->req_wait_q);
122 	}
123 }
124 
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 					unsigned long opt, __u32 timeout)
128 {
129 	DECLARE_WAITQUEUE(wait, current);
130 	int err = 0;
131 
132 	BT_DBG("%s start", hdev->name);
133 
134 	hdev->req_status = HCI_REQ_PEND;
135 
136 	add_wait_queue(&hdev->req_wait_q, &wait);
137 	set_current_state(TASK_INTERRUPTIBLE);
138 
139 	req(hdev, opt);
140 	schedule_timeout(timeout);
141 
142 	remove_wait_queue(&hdev->req_wait_q, &wait);
143 
144 	if (signal_pending(current))
145 		return -EINTR;
146 
147 	switch (hdev->req_status) {
148 	case HCI_REQ_DONE:
149 		err = -bt_to_errno(hdev->req_result);
150 		break;
151 
152 	case HCI_REQ_CANCELED:
153 		err = -hdev->req_result;
154 		break;
155 
156 	default:
157 		err = -ETIMEDOUT;
158 		break;
159 	}
160 
161 	hdev->req_status = hdev->req_result = 0;
162 
163 	BT_DBG("%s end: err %d", hdev->name, err);
164 
165 	return err;
166 }
167 
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 					unsigned long opt, __u32 timeout)
170 {
171 	int ret;
172 
173 	if (!test_bit(HCI_UP, &hdev->flags))
174 		return -ENETDOWN;
175 
176 	/* Serialize all requests */
177 	hci_req_lock(hdev);
178 	ret = __hci_request(hdev, req, opt, timeout);
179 	hci_req_unlock(hdev);
180 
181 	return ret;
182 }
183 
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 	BT_DBG("%s %ld", hdev->name, opt);
187 
188 	/* Reset device */
189 	set_bit(HCI_RESET, &hdev->flags);
190 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192 
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195 	struct hci_cp_delete_stored_link_key cp;
196 	struct sk_buff *skb;
197 	__le16 param;
198 	__u8 flt_type;
199 
200 	BT_DBG("%s %ld", hdev->name, opt);
201 
202 	/* Driver initialization */
203 
204 	/* Special commands */
205 	while ((skb = skb_dequeue(&hdev->driver_init))) {
206 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 		skb->dev = (void *) hdev;
208 
209 		skb_queue_tail(&hdev->cmd_q, skb);
210 		tasklet_schedule(&hdev->cmd_task);
211 	}
212 	skb_queue_purge(&hdev->driver_init);
213 
214 	/* Mandatory initialization */
215 
216 	/* Reset */
217 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 			set_bit(HCI_RESET, &hdev->flags);
219 			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220 	}
221 
222 	/* Read Local Supported Features */
223 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224 
225 	/* Read Local Version */
226 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227 
228 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230 
231 #if 0
232 	/* Host buffer size */
233 	{
234 		struct hci_cp_host_buffer_size cp;
235 		cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 		cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 		cp.acl_max_pkt = cpu_to_le16(0xffff);
238 		cp.sco_max_pkt = cpu_to_le16(0xffff);
239 		hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 	}
241 #endif
242 
243 	/* Read BD Address */
244 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245 
246 	/* Read Class of Device */
247 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248 
249 	/* Read Local Name */
250 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251 
252 	/* Read Voice Setting */
253 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254 
255 	/* Optional initialization */
256 
257 	/* Clear Event Filters */
258 	flt_type = HCI_FLT_CLEAR_ALL;
259 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260 
261 	/* Connection accept timeout ~20 secs */
262 	param = cpu_to_le16(0x7d00);
263 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264 
265 	bacpy(&cp.bdaddr, BDADDR_ANY);
266 	cp.delete_all = 1;
267 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269 
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272 	BT_DBG("%s", hdev->name);
273 
274 	/* Read LE buffer size */
275 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277 
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280 	__u8 scan = opt;
281 
282 	BT_DBG("%s %x", hdev->name, scan);
283 
284 	/* Inquiry and Page scans */
285 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287 
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290 	__u8 auth = opt;
291 
292 	BT_DBG("%s %x", hdev->name, auth);
293 
294 	/* Authentication */
295 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297 
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 	__u8 encrypt = opt;
301 
302 	BT_DBG("%s %x", hdev->name, encrypt);
303 
304 	/* Encryption */
305 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307 
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 	__le16 policy = cpu_to_le16(opt);
311 
312 	BT_DBG("%s %x", hdev->name, policy);
313 
314 	/* Default link policy */
315 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317 
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322 	struct hci_dev *hdev = NULL;
323 	struct list_head *p;
324 
325 	BT_DBG("%d", index);
326 
327 	if (index < 0)
328 		return NULL;
329 
330 	read_lock(&hci_dev_list_lock);
331 	list_for_each(p, &hci_dev_list) {
332 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
333 		if (d->id == index) {
334 			hdev = hci_dev_hold(d);
335 			break;
336 		}
337 	}
338 	read_unlock(&hci_dev_list_lock);
339 	return hdev;
340 }
341 
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
344 {
345 	struct inquiry_cache *cache = &hdev->inq_cache;
346 	struct inquiry_entry *next  = cache->list, *e;
347 
348 	BT_DBG("cache %p", cache);
349 
350 	cache->list = NULL;
351 	while ((e = next)) {
352 		next = e->next;
353 		kfree(e);
354 	}
355 }
356 
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 {
359 	struct inquiry_cache *cache = &hdev->inq_cache;
360 	struct inquiry_entry *e;
361 
362 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363 
364 	for (e = cache->list; e; e = e->next)
365 		if (!bacmp(&e->data.bdaddr, bdaddr))
366 			break;
367 	return e;
368 }
369 
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 {
372 	struct inquiry_cache *cache = &hdev->inq_cache;
373 	struct inquiry_entry *ie;
374 
375 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376 
377 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 	if (!ie) {
379 		/* Entry not in the cache. Add new one. */
380 		ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 		if (!ie)
382 			return;
383 
384 		ie->next = cache->list;
385 		cache->list = ie;
386 	}
387 
388 	memcpy(&ie->data, data, sizeof(*data));
389 	ie->timestamp = jiffies;
390 	cache->timestamp = jiffies;
391 }
392 
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 {
395 	struct inquiry_cache *cache = &hdev->inq_cache;
396 	struct inquiry_info *info = (struct inquiry_info *) buf;
397 	struct inquiry_entry *e;
398 	int copied = 0;
399 
400 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 		struct inquiry_data *data = &e->data;
402 		bacpy(&info->bdaddr, &data->bdaddr);
403 		info->pscan_rep_mode	= data->pscan_rep_mode;
404 		info->pscan_period_mode	= data->pscan_period_mode;
405 		info->pscan_mode	= data->pscan_mode;
406 		memcpy(info->dev_class, data->dev_class, 3);
407 		info->clock_offset	= data->clock_offset;
408 		info++;
409 	}
410 
411 	BT_DBG("cache %p, copied %d", cache, copied);
412 	return copied;
413 }
414 
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 {
417 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 	struct hci_cp_inquiry cp;
419 
420 	BT_DBG("%s", hdev->name);
421 
422 	if (test_bit(HCI_INQUIRY, &hdev->flags))
423 		return;
424 
425 	/* Start Inquiry */
426 	memcpy(&cp.lap, &ir->lap, 3);
427 	cp.length  = ir->length;
428 	cp.num_rsp = ir->num_rsp;
429 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430 }
431 
432 int hci_inquiry(void __user *arg)
433 {
434 	__u8 __user *ptr = arg;
435 	struct hci_inquiry_req ir;
436 	struct hci_dev *hdev;
437 	int err = 0, do_inquiry = 0, max_rsp;
438 	long timeo;
439 	__u8 *buf;
440 
441 	if (copy_from_user(&ir, ptr, sizeof(ir)))
442 		return -EFAULT;
443 
444 	hdev = hci_dev_get(ir.dev_id);
445 	if (!hdev)
446 		return -ENODEV;
447 
448 	hci_dev_lock_bh(hdev);
449 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450 				inquiry_cache_empty(hdev) ||
451 				ir.flags & IREQ_CACHE_FLUSH) {
452 		inquiry_cache_flush(hdev);
453 		do_inquiry = 1;
454 	}
455 	hci_dev_unlock_bh(hdev);
456 
457 	timeo = ir.length * msecs_to_jiffies(2000);
458 
459 	if (do_inquiry) {
460 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 		if (err < 0)
462 			goto done;
463 	}
464 
465 	/* for unlimited number of responses we will use buffer with 255 entries */
466 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467 
468 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 	 * copy it to the user space.
470 	 */
471 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472 	if (!buf) {
473 		err = -ENOMEM;
474 		goto done;
475 	}
476 
477 	hci_dev_lock_bh(hdev);
478 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 	hci_dev_unlock_bh(hdev);
480 
481 	BT_DBG("num_rsp %d", ir.num_rsp);
482 
483 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 		ptr += sizeof(ir);
485 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 					ir.num_rsp))
487 			err = -EFAULT;
488 	} else
489 		err = -EFAULT;
490 
491 	kfree(buf);
492 
493 done:
494 	hci_dev_put(hdev);
495 	return err;
496 }
497 
498 /* ---- HCI ioctl helpers ---- */
499 
500 int hci_dev_open(__u16 dev)
501 {
502 	struct hci_dev *hdev;
503 	int ret = 0;
504 
505 	hdev = hci_dev_get(dev);
506 	if (!hdev)
507 		return -ENODEV;
508 
509 	BT_DBG("%s %p", hdev->name, hdev);
510 
511 	hci_req_lock(hdev);
512 
513 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514 		ret = -ERFKILL;
515 		goto done;
516 	}
517 
518 	if (test_bit(HCI_UP, &hdev->flags)) {
519 		ret = -EALREADY;
520 		goto done;
521 	}
522 
523 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 		set_bit(HCI_RAW, &hdev->flags);
525 
526 	/* Treat all non BR/EDR controllers as raw devices for now */
527 	if (hdev->dev_type != HCI_BREDR)
528 		set_bit(HCI_RAW, &hdev->flags);
529 
530 	if (hdev->open(hdev)) {
531 		ret = -EIO;
532 		goto done;
533 	}
534 
535 	if (!test_bit(HCI_RAW, &hdev->flags)) {
536 		atomic_set(&hdev->cmd_cnt, 1);
537 		set_bit(HCI_INIT, &hdev->flags);
538 		hdev->init_last_cmd = 0;
539 
540 		ret = __hci_request(hdev, hci_init_req, 0,
541 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
542 
543 		if (lmp_host_le_capable(hdev))
544 			ret = __hci_request(hdev, hci_le_init_req, 0,
545 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
546 
547 		clear_bit(HCI_INIT, &hdev->flags);
548 	}
549 
550 	if (!ret) {
551 		hci_dev_hold(hdev);
552 		set_bit(HCI_UP, &hdev->flags);
553 		hci_notify(hdev, HCI_DEV_UP);
554 		if (!test_bit(HCI_SETUP, &hdev->flags))
555 			mgmt_powered(hdev->id, 1);
556 	} else {
557 		/* Init failed, cleanup */
558 		tasklet_kill(&hdev->rx_task);
559 		tasklet_kill(&hdev->tx_task);
560 		tasklet_kill(&hdev->cmd_task);
561 
562 		skb_queue_purge(&hdev->cmd_q);
563 		skb_queue_purge(&hdev->rx_q);
564 
565 		if (hdev->flush)
566 			hdev->flush(hdev);
567 
568 		if (hdev->sent_cmd) {
569 			kfree_skb(hdev->sent_cmd);
570 			hdev->sent_cmd = NULL;
571 		}
572 
573 		hdev->close(hdev);
574 		hdev->flags = 0;
575 	}
576 
577 done:
578 	hci_req_unlock(hdev);
579 	hci_dev_put(hdev);
580 	return ret;
581 }
582 
583 static int hci_dev_do_close(struct hci_dev *hdev)
584 {
585 	BT_DBG("%s %p", hdev->name, hdev);
586 
587 	hci_req_cancel(hdev, ENODEV);
588 	hci_req_lock(hdev);
589 
590 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
591 		del_timer_sync(&hdev->cmd_timer);
592 		hci_req_unlock(hdev);
593 		return 0;
594 	}
595 
596 	/* Kill RX and TX tasks */
597 	tasklet_kill(&hdev->rx_task);
598 	tasklet_kill(&hdev->tx_task);
599 
600 	hci_dev_lock_bh(hdev);
601 	inquiry_cache_flush(hdev);
602 	hci_conn_hash_flush(hdev);
603 	hci_dev_unlock_bh(hdev);
604 
605 	hci_notify(hdev, HCI_DEV_DOWN);
606 
607 	if (hdev->flush)
608 		hdev->flush(hdev);
609 
610 	/* Reset device */
611 	skb_queue_purge(&hdev->cmd_q);
612 	atomic_set(&hdev->cmd_cnt, 1);
613 	if (!test_bit(HCI_RAW, &hdev->flags)) {
614 		set_bit(HCI_INIT, &hdev->flags);
615 		__hci_request(hdev, hci_reset_req, 0,
616 					msecs_to_jiffies(250));
617 		clear_bit(HCI_INIT, &hdev->flags);
618 	}
619 
620 	/* Kill cmd task */
621 	tasklet_kill(&hdev->cmd_task);
622 
623 	/* Drop queues */
624 	skb_queue_purge(&hdev->rx_q);
625 	skb_queue_purge(&hdev->cmd_q);
626 	skb_queue_purge(&hdev->raw_q);
627 
628 	/* Drop last sent command */
629 	if (hdev->sent_cmd) {
630 		del_timer_sync(&hdev->cmd_timer);
631 		kfree_skb(hdev->sent_cmd);
632 		hdev->sent_cmd = NULL;
633 	}
634 
635 	/* After this point our queues are empty
636 	 * and no tasks are scheduled. */
637 	hdev->close(hdev);
638 
639 	mgmt_powered(hdev->id, 0);
640 
641 	/* Clear flags */
642 	hdev->flags = 0;
643 
644 	hci_req_unlock(hdev);
645 
646 	hci_dev_put(hdev);
647 	return 0;
648 }
649 
650 int hci_dev_close(__u16 dev)
651 {
652 	struct hci_dev *hdev;
653 	int err;
654 
655 	hdev = hci_dev_get(dev);
656 	if (!hdev)
657 		return -ENODEV;
658 	err = hci_dev_do_close(hdev);
659 	hci_dev_put(hdev);
660 	return err;
661 }
662 
663 int hci_dev_reset(__u16 dev)
664 {
665 	struct hci_dev *hdev;
666 	int ret = 0;
667 
668 	hdev = hci_dev_get(dev);
669 	if (!hdev)
670 		return -ENODEV;
671 
672 	hci_req_lock(hdev);
673 	tasklet_disable(&hdev->tx_task);
674 
675 	if (!test_bit(HCI_UP, &hdev->flags))
676 		goto done;
677 
678 	/* Drop queues */
679 	skb_queue_purge(&hdev->rx_q);
680 	skb_queue_purge(&hdev->cmd_q);
681 
682 	hci_dev_lock_bh(hdev);
683 	inquiry_cache_flush(hdev);
684 	hci_conn_hash_flush(hdev);
685 	hci_dev_unlock_bh(hdev);
686 
687 	if (hdev->flush)
688 		hdev->flush(hdev);
689 
690 	atomic_set(&hdev->cmd_cnt, 1);
691 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
692 
693 	if (!test_bit(HCI_RAW, &hdev->flags))
694 		ret = __hci_request(hdev, hci_reset_req, 0,
695 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 
697 done:
698 	tasklet_enable(&hdev->tx_task);
699 	hci_req_unlock(hdev);
700 	hci_dev_put(hdev);
701 	return ret;
702 }
703 
704 int hci_dev_reset_stat(__u16 dev)
705 {
706 	struct hci_dev *hdev;
707 	int ret = 0;
708 
709 	hdev = hci_dev_get(dev);
710 	if (!hdev)
711 		return -ENODEV;
712 
713 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714 
715 	hci_dev_put(hdev);
716 
717 	return ret;
718 }
719 
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
721 {
722 	struct hci_dev *hdev;
723 	struct hci_dev_req dr;
724 	int err = 0;
725 
726 	if (copy_from_user(&dr, arg, sizeof(dr)))
727 		return -EFAULT;
728 
729 	hdev = hci_dev_get(dr.dev_id);
730 	if (!hdev)
731 		return -ENODEV;
732 
733 	switch (cmd) {
734 	case HCISETAUTH:
735 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
737 		break;
738 
739 	case HCISETENCRYPT:
740 		if (!lmp_encrypt_capable(hdev)) {
741 			err = -EOPNOTSUPP;
742 			break;
743 		}
744 
745 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
746 			/* Auth must be enabled first */
747 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
749 			if (err)
750 				break;
751 		}
752 
753 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
755 		break;
756 
757 	case HCISETSCAN:
758 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 		break;
761 
762 	case HCISETLINKPOL:
763 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
765 		break;
766 
767 	case HCISETLINKMODE:
768 		hdev->link_mode = ((__u16) dr.dev_opt) &
769 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
770 		break;
771 
772 	case HCISETPTYPE:
773 		hdev->pkt_type = (__u16) dr.dev_opt;
774 		break;
775 
776 	case HCISETACLMTU:
777 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
778 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
779 		break;
780 
781 	case HCISETSCOMTU:
782 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
783 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
784 		break;
785 
786 	default:
787 		err = -EINVAL;
788 		break;
789 	}
790 
791 	hci_dev_put(hdev);
792 	return err;
793 }
794 
795 int hci_get_dev_list(void __user *arg)
796 {
797 	struct hci_dev_list_req *dl;
798 	struct hci_dev_req *dr;
799 	struct list_head *p;
800 	int n = 0, size, err;
801 	__u16 dev_num;
802 
803 	if (get_user(dev_num, (__u16 __user *) arg))
804 		return -EFAULT;
805 
806 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807 		return -EINVAL;
808 
809 	size = sizeof(*dl) + dev_num * sizeof(*dr);
810 
811 	dl = kzalloc(size, GFP_KERNEL);
812 	if (!dl)
813 		return -ENOMEM;
814 
815 	dr = dl->dev_req;
816 
817 	read_lock_bh(&hci_dev_list_lock);
818 	list_for_each(p, &hci_dev_list) {
819 		struct hci_dev *hdev;
820 
821 		hdev = list_entry(p, struct hci_dev, list);
822 
823 		hci_del_off_timer(hdev);
824 
825 		if (!test_bit(HCI_MGMT, &hdev->flags))
826 			set_bit(HCI_PAIRABLE, &hdev->flags);
827 
828 		(dr + n)->dev_id  = hdev->id;
829 		(dr + n)->dev_opt = hdev->flags;
830 
831 		if (++n >= dev_num)
832 			break;
833 	}
834 	read_unlock_bh(&hci_dev_list_lock);
835 
836 	dl->dev_num = n;
837 	size = sizeof(*dl) + n * sizeof(*dr);
838 
839 	err = copy_to_user(arg, dl, size);
840 	kfree(dl);
841 
842 	return err ? -EFAULT : 0;
843 }
844 
845 int hci_get_dev_info(void __user *arg)
846 {
847 	struct hci_dev *hdev;
848 	struct hci_dev_info di;
849 	int err = 0;
850 
851 	if (copy_from_user(&di, arg, sizeof(di)))
852 		return -EFAULT;
853 
854 	hdev = hci_dev_get(di.dev_id);
855 	if (!hdev)
856 		return -ENODEV;
857 
858 	hci_del_off_timer(hdev);
859 
860 	if (!test_bit(HCI_MGMT, &hdev->flags))
861 		set_bit(HCI_PAIRABLE, &hdev->flags);
862 
863 	strcpy(di.name, hdev->name);
864 	di.bdaddr   = hdev->bdaddr;
865 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866 	di.flags    = hdev->flags;
867 	di.pkt_type = hdev->pkt_type;
868 	di.acl_mtu  = hdev->acl_mtu;
869 	di.acl_pkts = hdev->acl_pkts;
870 	di.sco_mtu  = hdev->sco_mtu;
871 	di.sco_pkts = hdev->sco_pkts;
872 	di.link_policy = hdev->link_policy;
873 	di.link_mode   = hdev->link_mode;
874 
875 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 	memcpy(&di.features, &hdev->features, sizeof(di.features));
877 
878 	if (copy_to_user(arg, &di, sizeof(di)))
879 		err = -EFAULT;
880 
881 	hci_dev_put(hdev);
882 
883 	return err;
884 }
885 
886 /* ---- Interface to HCI drivers ---- */
887 
888 static int hci_rfkill_set_block(void *data, bool blocked)
889 {
890 	struct hci_dev *hdev = data;
891 
892 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893 
894 	if (!blocked)
895 		return 0;
896 
897 	hci_dev_do_close(hdev);
898 
899 	return 0;
900 }
901 
902 static const struct rfkill_ops hci_rfkill_ops = {
903 	.set_block = hci_rfkill_set_block,
904 };
905 
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
908 {
909 	struct hci_dev *hdev;
910 
911 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912 	if (!hdev)
913 		return NULL;
914 
915 	skb_queue_head_init(&hdev->driver_init);
916 
917 	return hdev;
918 }
919 EXPORT_SYMBOL(hci_alloc_dev);
920 
921 /* Free HCI device */
922 void hci_free_dev(struct hci_dev *hdev)
923 {
924 	skb_queue_purge(&hdev->driver_init);
925 
926 	/* will free via device release */
927 	put_device(&hdev->dev);
928 }
929 EXPORT_SYMBOL(hci_free_dev);
930 
931 static void hci_power_on(struct work_struct *work)
932 {
933 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934 
935 	BT_DBG("%s", hdev->name);
936 
937 	if (hci_dev_open(hdev->id) < 0)
938 		return;
939 
940 	if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 		mod_timer(&hdev->off_timer,
942 				jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943 
944 	if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 		mgmt_index_added(hdev->id);
946 }
947 
948 static void hci_power_off(struct work_struct *work)
949 {
950 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951 
952 	BT_DBG("%s", hdev->name);
953 
954 	hci_dev_close(hdev->id);
955 }
956 
957 static void hci_auto_off(unsigned long data)
958 {
959 	struct hci_dev *hdev = (struct hci_dev *) data;
960 
961 	BT_DBG("%s", hdev->name);
962 
963 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
964 
965 	queue_work(hdev->workqueue, &hdev->power_off);
966 }
967 
968 void hci_del_off_timer(struct hci_dev *hdev)
969 {
970 	BT_DBG("%s", hdev->name);
971 
972 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 	del_timer(&hdev->off_timer);
974 }
975 
976 int hci_uuids_clear(struct hci_dev *hdev)
977 {
978 	struct list_head *p, *n;
979 
980 	list_for_each_safe(p, n, &hdev->uuids) {
981 		struct bt_uuid *uuid;
982 
983 		uuid = list_entry(p, struct bt_uuid, list);
984 
985 		list_del(p);
986 		kfree(uuid);
987 	}
988 
989 	return 0;
990 }
991 
992 int hci_link_keys_clear(struct hci_dev *hdev)
993 {
994 	struct list_head *p, *n;
995 
996 	list_for_each_safe(p, n, &hdev->link_keys) {
997 		struct link_key *key;
998 
999 		key = list_entry(p, struct link_key, list);
1000 
1001 		list_del(p);
1002 		kfree(key);
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009 {
1010 	struct list_head *p;
1011 
1012 	list_for_each(p, &hdev->link_keys) {
1013 		struct link_key *k;
1014 
1015 		k = list_entry(p, struct link_key, list);
1016 
1017 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1018 			return k;
1019 	}
1020 
1021 	return NULL;
1022 }
1023 
1024 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 						u8 key_type, u8 old_key_type)
1026 {
1027 	/* Legacy key */
1028 	if (key_type < 0x03)
1029 		return 1;
1030 
1031 	/* Debug keys are insecure so don't store them persistently */
1032 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1033 		return 0;
1034 
1035 	/* Changed combination key and there's no previous one */
1036 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1037 		return 0;
1038 
1039 	/* Security mode 3 case */
1040 	if (!conn)
1041 		return 1;
1042 
1043 	/* Neither local nor remote side had no-bonding as requirement */
1044 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1045 		return 1;
1046 
1047 	/* Local side had dedicated bonding as requirement */
1048 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1049 		return 1;
1050 
1051 	/* Remote side had dedicated bonding as requirement */
1052 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1053 		return 1;
1054 
1055 	/* If none of the above criteria match, then don't store the key
1056 	 * persistently */
1057 	return 0;
1058 }
1059 
1060 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1061 {
1062 	struct link_key *k;
1063 
1064 	list_for_each_entry(k, &hdev->link_keys, list) {
1065 		struct key_master_id *id;
1066 
1067 		if (k->type != HCI_LK_SMP_LTK)
1068 			continue;
1069 
1070 		if (k->dlen != sizeof(*id))
1071 			continue;
1072 
1073 		id = (void *) &k->data;
1074 		if (id->ediv == ediv &&
1075 				(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1076 			return k;
1077 	}
1078 
1079 	return NULL;
1080 }
1081 EXPORT_SYMBOL(hci_find_ltk);
1082 
1083 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1084 					bdaddr_t *bdaddr, u8 type)
1085 {
1086 	struct link_key *k;
1087 
1088 	list_for_each_entry(k, &hdev->link_keys, list)
1089 		if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 			return k;
1091 
1092 	return NULL;
1093 }
1094 EXPORT_SYMBOL(hci_find_link_key_type);
1095 
1096 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1097 				bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1098 {
1099 	struct link_key *key, *old_key;
1100 	u8 old_key_type, persistent;
1101 
1102 	old_key = hci_find_link_key(hdev, bdaddr);
1103 	if (old_key) {
1104 		old_key_type = old_key->type;
1105 		key = old_key;
1106 	} else {
1107 		old_key_type = conn ? conn->key_type : 0xff;
1108 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1109 		if (!key)
1110 			return -ENOMEM;
1111 		list_add(&key->list, &hdev->link_keys);
1112 	}
1113 
1114 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1115 
1116 	/* Some buggy controller combinations generate a changed
1117 	 * combination key for legacy pairing even when there's no
1118 	 * previous key */
1119 	if (type == HCI_LK_CHANGED_COMBINATION &&
1120 					(!conn || conn->remote_auth == 0xff) &&
1121 					old_key_type == 0xff) {
1122 		type = HCI_LK_COMBINATION;
1123 		if (conn)
1124 			conn->key_type = type;
1125 	}
1126 
1127 	bacpy(&key->bdaddr, bdaddr);
1128 	memcpy(key->val, val, 16);
1129 	key->pin_len = pin_len;
1130 
1131 	if (type == HCI_LK_CHANGED_COMBINATION)
1132 		key->type = old_key_type;
1133 	else
1134 		key->type = type;
1135 
1136 	if (!new_key)
1137 		return 0;
1138 
1139 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1140 
1141 	mgmt_new_key(hdev->id, key, persistent);
1142 
1143 	if (!persistent) {
1144 		list_del(&key->list);
1145 		kfree(key);
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1152 			u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1153 {
1154 	struct link_key *key, *old_key;
1155 	struct key_master_id *id;
1156 	u8 old_key_type;
1157 
1158 	BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1159 
1160 	old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1161 	if (old_key) {
1162 		key = old_key;
1163 		old_key_type = old_key->type;
1164 	} else {
1165 		key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1166 		if (!key)
1167 			return -ENOMEM;
1168 		list_add(&key->list, &hdev->link_keys);
1169 		old_key_type = 0xff;
1170 	}
1171 
1172 	key->dlen = sizeof(*id);
1173 
1174 	bacpy(&key->bdaddr, bdaddr);
1175 	memcpy(key->val, ltk, sizeof(key->val));
1176 	key->type = HCI_LK_SMP_LTK;
1177 	key->pin_len = key_size;
1178 
1179 	id = (void *) &key->data;
1180 	id->ediv = ediv;
1181 	memcpy(id->rand, rand, sizeof(id->rand));
1182 
1183 	if (new_key)
1184 		mgmt_new_key(hdev->id, key, old_key_type);
1185 
1186 	return 0;
1187 }
1188 
1189 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1190 {
1191 	struct link_key *key;
1192 
1193 	key = hci_find_link_key(hdev, bdaddr);
1194 	if (!key)
1195 		return -ENOENT;
1196 
1197 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1198 
1199 	list_del(&key->list);
1200 	kfree(key);
1201 
1202 	return 0;
1203 }
1204 
1205 /* HCI command timer function */
1206 static void hci_cmd_timer(unsigned long arg)
1207 {
1208 	struct hci_dev *hdev = (void *) arg;
1209 
1210 	BT_ERR("%s command tx timeout", hdev->name);
1211 	atomic_set(&hdev->cmd_cnt, 1);
1212 	tasklet_schedule(&hdev->cmd_task);
1213 }
1214 
1215 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1216 							bdaddr_t *bdaddr)
1217 {
1218 	struct oob_data *data;
1219 
1220 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1221 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1222 			return data;
1223 
1224 	return NULL;
1225 }
1226 
1227 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1228 {
1229 	struct oob_data *data;
1230 
1231 	data = hci_find_remote_oob_data(hdev, bdaddr);
1232 	if (!data)
1233 		return -ENOENT;
1234 
1235 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1236 
1237 	list_del(&data->list);
1238 	kfree(data);
1239 
1240 	return 0;
1241 }
1242 
1243 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1244 {
1245 	struct oob_data *data, *n;
1246 
1247 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1248 		list_del(&data->list);
1249 		kfree(data);
1250 	}
1251 
1252 	return 0;
1253 }
1254 
1255 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1256 								u8 *randomizer)
1257 {
1258 	struct oob_data *data;
1259 
1260 	data = hci_find_remote_oob_data(hdev, bdaddr);
1261 
1262 	if (!data) {
1263 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1264 		if (!data)
1265 			return -ENOMEM;
1266 
1267 		bacpy(&data->bdaddr, bdaddr);
1268 		list_add(&data->list, &hdev->remote_oob_data);
1269 	}
1270 
1271 	memcpy(data->hash, hash, sizeof(data->hash));
1272 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1273 
1274 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1275 
1276 	return 0;
1277 }
1278 
1279 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1280 						bdaddr_t *bdaddr)
1281 {
1282 	struct list_head *p;
1283 
1284 	list_for_each(p, &hdev->blacklist) {
1285 		struct bdaddr_list *b;
1286 
1287 		b = list_entry(p, struct bdaddr_list, list);
1288 
1289 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1290 			return b;
1291 	}
1292 
1293 	return NULL;
1294 }
1295 
1296 int hci_blacklist_clear(struct hci_dev *hdev)
1297 {
1298 	struct list_head *p, *n;
1299 
1300 	list_for_each_safe(p, n, &hdev->blacklist) {
1301 		struct bdaddr_list *b;
1302 
1303 		b = list_entry(p, struct bdaddr_list, list);
1304 
1305 		list_del(p);
1306 		kfree(b);
1307 	}
1308 
1309 	return 0;
1310 }
1311 
1312 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1313 {
1314 	struct bdaddr_list *entry;
1315 	int err;
1316 
1317 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1318 		return -EBADF;
1319 
1320 	hci_dev_lock_bh(hdev);
1321 
1322 	if (hci_blacklist_lookup(hdev, bdaddr)) {
1323 		err = -EEXIST;
1324 		goto err;
1325 	}
1326 
1327 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1328 	if (!entry) {
1329 		err = -ENOMEM;
1330 		goto err;
1331 	}
1332 
1333 	bacpy(&entry->bdaddr, bdaddr);
1334 
1335 	list_add(&entry->list, &hdev->blacklist);
1336 
1337 	err = 0;
1338 
1339 err:
1340 	hci_dev_unlock_bh(hdev);
1341 	return err;
1342 }
1343 
1344 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345 {
1346 	struct bdaddr_list *entry;
1347 	int err = 0;
1348 
1349 	hci_dev_lock_bh(hdev);
1350 
1351 	if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1352 		hci_blacklist_clear(hdev);
1353 		goto done;
1354 	}
1355 
1356 	entry = hci_blacklist_lookup(hdev, bdaddr);
1357 	if (!entry) {
1358 		err = -ENOENT;
1359 		goto done;
1360 	}
1361 
1362 	list_del(&entry->list);
1363 	kfree(entry);
1364 
1365 done:
1366 	hci_dev_unlock_bh(hdev);
1367 	return err;
1368 }
1369 
1370 static void hci_clear_adv_cache(unsigned long arg)
1371 {
1372 	struct hci_dev *hdev = (void *) arg;
1373 
1374 	hci_dev_lock(hdev);
1375 
1376 	hci_adv_entries_clear(hdev);
1377 
1378 	hci_dev_unlock(hdev);
1379 }
1380 
1381 int hci_adv_entries_clear(struct hci_dev *hdev)
1382 {
1383 	struct adv_entry *entry, *tmp;
1384 
1385 	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1386 		list_del(&entry->list);
1387 		kfree(entry);
1388 	}
1389 
1390 	BT_DBG("%s adv cache cleared", hdev->name);
1391 
1392 	return 0;
1393 }
1394 
1395 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1396 {
1397 	struct adv_entry *entry;
1398 
1399 	list_for_each_entry(entry, &hdev->adv_entries, list)
1400 		if (bacmp(bdaddr, &entry->bdaddr) == 0)
1401 			return entry;
1402 
1403 	return NULL;
1404 }
1405 
1406 static inline int is_connectable_adv(u8 evt_type)
1407 {
1408 	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1409 		return 1;
1410 
1411 	return 0;
1412 }
1413 
1414 int hci_add_adv_entry(struct hci_dev *hdev,
1415 					struct hci_ev_le_advertising_info *ev)
1416 {
1417 	struct adv_entry *entry;
1418 
1419 	if (!is_connectable_adv(ev->evt_type))
1420 		return -EINVAL;
1421 
1422 	/* Only new entries should be added to adv_entries. So, if
1423 	 * bdaddr was found, don't add it. */
1424 	if (hci_find_adv_entry(hdev, &ev->bdaddr))
1425 		return 0;
1426 
1427 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1428 	if (!entry)
1429 		return -ENOMEM;
1430 
1431 	bacpy(&entry->bdaddr, &ev->bdaddr);
1432 	entry->bdaddr_type = ev->bdaddr_type;
1433 
1434 	list_add(&entry->list, &hdev->adv_entries);
1435 
1436 	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1437 				batostr(&entry->bdaddr), entry->bdaddr_type);
1438 
1439 	return 0;
1440 }
1441 
1442 /* Register HCI device */
1443 int hci_register_dev(struct hci_dev *hdev)
1444 {
1445 	struct list_head *head = &hci_dev_list, *p;
1446 	int i, id = 0;
1447 
1448 	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1449 						hdev->bus, hdev->owner);
1450 
1451 	if (!hdev->open || !hdev->close || !hdev->destruct)
1452 		return -EINVAL;
1453 
1454 	write_lock_bh(&hci_dev_list_lock);
1455 
1456 	/* Find first available device id */
1457 	list_for_each(p, &hci_dev_list) {
1458 		if (list_entry(p, struct hci_dev, list)->id != id)
1459 			break;
1460 		head = p; id++;
1461 	}
1462 
1463 	sprintf(hdev->name, "hci%d", id);
1464 	hdev->id = id;
1465 	list_add(&hdev->list, head);
1466 
1467 	atomic_set(&hdev->refcnt, 1);
1468 	spin_lock_init(&hdev->lock);
1469 
1470 	hdev->flags = 0;
1471 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1472 	hdev->esco_type = (ESCO_HV1);
1473 	hdev->link_mode = (HCI_LM_ACCEPT);
1474 	hdev->io_capability = 0x03; /* No Input No Output */
1475 
1476 	hdev->idle_timeout = 0;
1477 	hdev->sniff_max_interval = 800;
1478 	hdev->sniff_min_interval = 80;
1479 
1480 	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1481 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1482 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1483 
1484 	skb_queue_head_init(&hdev->rx_q);
1485 	skb_queue_head_init(&hdev->cmd_q);
1486 	skb_queue_head_init(&hdev->raw_q);
1487 
1488 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1489 
1490 	for (i = 0; i < NUM_REASSEMBLY; i++)
1491 		hdev->reassembly[i] = NULL;
1492 
1493 	init_waitqueue_head(&hdev->req_wait_q);
1494 	mutex_init(&hdev->req_lock);
1495 
1496 	inquiry_cache_init(hdev);
1497 
1498 	hci_conn_hash_init(hdev);
1499 
1500 	INIT_LIST_HEAD(&hdev->blacklist);
1501 
1502 	INIT_LIST_HEAD(&hdev->uuids);
1503 
1504 	INIT_LIST_HEAD(&hdev->link_keys);
1505 
1506 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1507 
1508 	INIT_LIST_HEAD(&hdev->adv_entries);
1509 	setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1510 						(unsigned long) hdev);
1511 
1512 	INIT_WORK(&hdev->power_on, hci_power_on);
1513 	INIT_WORK(&hdev->power_off, hci_power_off);
1514 	setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1515 
1516 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1517 
1518 	atomic_set(&hdev->promisc, 0);
1519 
1520 	write_unlock_bh(&hci_dev_list_lock);
1521 
1522 	hdev->workqueue = create_singlethread_workqueue(hdev->name);
1523 	if (!hdev->workqueue)
1524 		goto nomem;
1525 
1526 	hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1527 	if (IS_ERR(hdev->tfm))
1528 		BT_INFO("Failed to load transform for ecb(aes): %ld",
1529 							PTR_ERR(hdev->tfm));
1530 
1531 	hci_register_sysfs(hdev);
1532 
1533 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1534 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1535 	if (hdev->rfkill) {
1536 		if (rfkill_register(hdev->rfkill) < 0) {
1537 			rfkill_destroy(hdev->rfkill);
1538 			hdev->rfkill = NULL;
1539 		}
1540 	}
1541 
1542 	set_bit(HCI_AUTO_OFF, &hdev->flags);
1543 	set_bit(HCI_SETUP, &hdev->flags);
1544 	queue_work(hdev->workqueue, &hdev->power_on);
1545 
1546 	hci_notify(hdev, HCI_DEV_REG);
1547 
1548 	return id;
1549 
1550 nomem:
1551 	write_lock_bh(&hci_dev_list_lock);
1552 	list_del(&hdev->list);
1553 	write_unlock_bh(&hci_dev_list_lock);
1554 
1555 	return -ENOMEM;
1556 }
1557 EXPORT_SYMBOL(hci_register_dev);
1558 
1559 /* Unregister HCI device */
1560 int hci_unregister_dev(struct hci_dev *hdev)
1561 {
1562 	int i;
1563 
1564 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1565 
1566 	write_lock_bh(&hci_dev_list_lock);
1567 	list_del(&hdev->list);
1568 	write_unlock_bh(&hci_dev_list_lock);
1569 
1570 	hci_dev_do_close(hdev);
1571 
1572 	for (i = 0; i < NUM_REASSEMBLY; i++)
1573 		kfree_skb(hdev->reassembly[i]);
1574 
1575 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1576 					!test_bit(HCI_SETUP, &hdev->flags))
1577 		mgmt_index_removed(hdev->id);
1578 
1579 	if (!IS_ERR(hdev->tfm))
1580 		crypto_free_blkcipher(hdev->tfm);
1581 
1582 	hci_notify(hdev, HCI_DEV_UNREG);
1583 
1584 	if (hdev->rfkill) {
1585 		rfkill_unregister(hdev->rfkill);
1586 		rfkill_destroy(hdev->rfkill);
1587 	}
1588 
1589 	hci_unregister_sysfs(hdev);
1590 
1591 	hci_del_off_timer(hdev);
1592 	del_timer(&hdev->adv_timer);
1593 
1594 	destroy_workqueue(hdev->workqueue);
1595 
1596 	hci_dev_lock_bh(hdev);
1597 	hci_blacklist_clear(hdev);
1598 	hci_uuids_clear(hdev);
1599 	hci_link_keys_clear(hdev);
1600 	hci_remote_oob_data_clear(hdev);
1601 	hci_adv_entries_clear(hdev);
1602 	hci_dev_unlock_bh(hdev);
1603 
1604 	__hci_dev_put(hdev);
1605 
1606 	return 0;
1607 }
1608 EXPORT_SYMBOL(hci_unregister_dev);
1609 
1610 /* Suspend HCI device */
1611 int hci_suspend_dev(struct hci_dev *hdev)
1612 {
1613 	hci_notify(hdev, HCI_DEV_SUSPEND);
1614 	return 0;
1615 }
1616 EXPORT_SYMBOL(hci_suspend_dev);
1617 
1618 /* Resume HCI device */
1619 int hci_resume_dev(struct hci_dev *hdev)
1620 {
1621 	hci_notify(hdev, HCI_DEV_RESUME);
1622 	return 0;
1623 }
1624 EXPORT_SYMBOL(hci_resume_dev);
1625 
1626 /* Receive frame from HCI drivers */
1627 int hci_recv_frame(struct sk_buff *skb)
1628 {
1629 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1630 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1631 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1632 		kfree_skb(skb);
1633 		return -ENXIO;
1634 	}
1635 
1636 	/* Incomming skb */
1637 	bt_cb(skb)->incoming = 1;
1638 
1639 	/* Time stamp */
1640 	__net_timestamp(skb);
1641 
1642 	/* Queue frame for rx task */
1643 	skb_queue_tail(&hdev->rx_q, skb);
1644 	tasklet_schedule(&hdev->rx_task);
1645 
1646 	return 0;
1647 }
1648 EXPORT_SYMBOL(hci_recv_frame);
1649 
1650 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1651 						  int count, __u8 index)
1652 {
1653 	int len = 0;
1654 	int hlen = 0;
1655 	int remain = count;
1656 	struct sk_buff *skb;
1657 	struct bt_skb_cb *scb;
1658 
1659 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1660 				index >= NUM_REASSEMBLY)
1661 		return -EILSEQ;
1662 
1663 	skb = hdev->reassembly[index];
1664 
1665 	if (!skb) {
1666 		switch (type) {
1667 		case HCI_ACLDATA_PKT:
1668 			len = HCI_MAX_FRAME_SIZE;
1669 			hlen = HCI_ACL_HDR_SIZE;
1670 			break;
1671 		case HCI_EVENT_PKT:
1672 			len = HCI_MAX_EVENT_SIZE;
1673 			hlen = HCI_EVENT_HDR_SIZE;
1674 			break;
1675 		case HCI_SCODATA_PKT:
1676 			len = HCI_MAX_SCO_SIZE;
1677 			hlen = HCI_SCO_HDR_SIZE;
1678 			break;
1679 		}
1680 
1681 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1682 		if (!skb)
1683 			return -ENOMEM;
1684 
1685 		scb = (void *) skb->cb;
1686 		scb->expect = hlen;
1687 		scb->pkt_type = type;
1688 
1689 		skb->dev = (void *) hdev;
1690 		hdev->reassembly[index] = skb;
1691 	}
1692 
1693 	while (count) {
1694 		scb = (void *) skb->cb;
1695 		len = min(scb->expect, (__u16)count);
1696 
1697 		memcpy(skb_put(skb, len), data, len);
1698 
1699 		count -= len;
1700 		data += len;
1701 		scb->expect -= len;
1702 		remain = count;
1703 
1704 		switch (type) {
1705 		case HCI_EVENT_PKT:
1706 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1707 				struct hci_event_hdr *h = hci_event_hdr(skb);
1708 				scb->expect = h->plen;
1709 
1710 				if (skb_tailroom(skb) < scb->expect) {
1711 					kfree_skb(skb);
1712 					hdev->reassembly[index] = NULL;
1713 					return -ENOMEM;
1714 				}
1715 			}
1716 			break;
1717 
1718 		case HCI_ACLDATA_PKT:
1719 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1720 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1721 				scb->expect = __le16_to_cpu(h->dlen);
1722 
1723 				if (skb_tailroom(skb) < scb->expect) {
1724 					kfree_skb(skb);
1725 					hdev->reassembly[index] = NULL;
1726 					return -ENOMEM;
1727 				}
1728 			}
1729 			break;
1730 
1731 		case HCI_SCODATA_PKT:
1732 			if (skb->len == HCI_SCO_HDR_SIZE) {
1733 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1734 				scb->expect = h->dlen;
1735 
1736 				if (skb_tailroom(skb) < scb->expect) {
1737 					kfree_skb(skb);
1738 					hdev->reassembly[index] = NULL;
1739 					return -ENOMEM;
1740 				}
1741 			}
1742 			break;
1743 		}
1744 
1745 		if (scb->expect == 0) {
1746 			/* Complete frame */
1747 
1748 			bt_cb(skb)->pkt_type = type;
1749 			hci_recv_frame(skb);
1750 
1751 			hdev->reassembly[index] = NULL;
1752 			return remain;
1753 		}
1754 	}
1755 
1756 	return remain;
1757 }
1758 
1759 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1760 {
1761 	int rem = 0;
1762 
1763 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1764 		return -EILSEQ;
1765 
1766 	while (count) {
1767 		rem = hci_reassembly(hdev, type, data, count, type - 1);
1768 		if (rem < 0)
1769 			return rem;
1770 
1771 		data += (count - rem);
1772 		count = rem;
1773 	}
1774 
1775 	return rem;
1776 }
1777 EXPORT_SYMBOL(hci_recv_fragment);
1778 
1779 #define STREAM_REASSEMBLY 0
1780 
1781 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1782 {
1783 	int type;
1784 	int rem = 0;
1785 
1786 	while (count) {
1787 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1788 
1789 		if (!skb) {
1790 			struct { char type; } *pkt;
1791 
1792 			/* Start of the frame */
1793 			pkt = data;
1794 			type = pkt->type;
1795 
1796 			data++;
1797 			count--;
1798 		} else
1799 			type = bt_cb(skb)->pkt_type;
1800 
1801 		rem = hci_reassembly(hdev, type, data, count,
1802 							STREAM_REASSEMBLY);
1803 		if (rem < 0)
1804 			return rem;
1805 
1806 		data += (count - rem);
1807 		count = rem;
1808 	}
1809 
1810 	return rem;
1811 }
1812 EXPORT_SYMBOL(hci_recv_stream_fragment);
1813 
1814 /* ---- Interface to upper protocols ---- */
1815 
1816 /* Register/Unregister protocols.
1817  * hci_task_lock is used to ensure that no tasks are running. */
1818 int hci_register_proto(struct hci_proto *hp)
1819 {
1820 	int err = 0;
1821 
1822 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1823 
1824 	if (hp->id >= HCI_MAX_PROTO)
1825 		return -EINVAL;
1826 
1827 	write_lock_bh(&hci_task_lock);
1828 
1829 	if (!hci_proto[hp->id])
1830 		hci_proto[hp->id] = hp;
1831 	else
1832 		err = -EEXIST;
1833 
1834 	write_unlock_bh(&hci_task_lock);
1835 
1836 	return err;
1837 }
1838 EXPORT_SYMBOL(hci_register_proto);
1839 
1840 int hci_unregister_proto(struct hci_proto *hp)
1841 {
1842 	int err = 0;
1843 
1844 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1845 
1846 	if (hp->id >= HCI_MAX_PROTO)
1847 		return -EINVAL;
1848 
1849 	write_lock_bh(&hci_task_lock);
1850 
1851 	if (hci_proto[hp->id])
1852 		hci_proto[hp->id] = NULL;
1853 	else
1854 		err = -ENOENT;
1855 
1856 	write_unlock_bh(&hci_task_lock);
1857 
1858 	return err;
1859 }
1860 EXPORT_SYMBOL(hci_unregister_proto);
1861 
1862 int hci_register_cb(struct hci_cb *cb)
1863 {
1864 	BT_DBG("%p name %s", cb, cb->name);
1865 
1866 	write_lock_bh(&hci_cb_list_lock);
1867 	list_add(&cb->list, &hci_cb_list);
1868 	write_unlock_bh(&hci_cb_list_lock);
1869 
1870 	return 0;
1871 }
1872 EXPORT_SYMBOL(hci_register_cb);
1873 
1874 int hci_unregister_cb(struct hci_cb *cb)
1875 {
1876 	BT_DBG("%p name %s", cb, cb->name);
1877 
1878 	write_lock_bh(&hci_cb_list_lock);
1879 	list_del(&cb->list);
1880 	write_unlock_bh(&hci_cb_list_lock);
1881 
1882 	return 0;
1883 }
1884 EXPORT_SYMBOL(hci_unregister_cb);
1885 
1886 static int hci_send_frame(struct sk_buff *skb)
1887 {
1888 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1889 
1890 	if (!hdev) {
1891 		kfree_skb(skb);
1892 		return -ENODEV;
1893 	}
1894 
1895 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1896 
1897 	if (atomic_read(&hdev->promisc)) {
1898 		/* Time stamp */
1899 		__net_timestamp(skb);
1900 
1901 		hci_send_to_sock(hdev, skb, NULL);
1902 	}
1903 
1904 	/* Get rid of skb owner, prior to sending to the driver. */
1905 	skb_orphan(skb);
1906 
1907 	return hdev->send(skb);
1908 }
1909 
1910 /* Send HCI command */
1911 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1912 {
1913 	int len = HCI_COMMAND_HDR_SIZE + plen;
1914 	struct hci_command_hdr *hdr;
1915 	struct sk_buff *skb;
1916 
1917 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1918 
1919 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1920 	if (!skb) {
1921 		BT_ERR("%s no memory for command", hdev->name);
1922 		return -ENOMEM;
1923 	}
1924 
1925 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1926 	hdr->opcode = cpu_to_le16(opcode);
1927 	hdr->plen   = plen;
1928 
1929 	if (plen)
1930 		memcpy(skb_put(skb, plen), param, plen);
1931 
1932 	BT_DBG("skb len %d", skb->len);
1933 
1934 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1935 	skb->dev = (void *) hdev;
1936 
1937 	if (test_bit(HCI_INIT, &hdev->flags))
1938 		hdev->init_last_cmd = opcode;
1939 
1940 	skb_queue_tail(&hdev->cmd_q, skb);
1941 	tasklet_schedule(&hdev->cmd_task);
1942 
1943 	return 0;
1944 }
1945 
1946 /* Get data from the previously sent command */
1947 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1948 {
1949 	struct hci_command_hdr *hdr;
1950 
1951 	if (!hdev->sent_cmd)
1952 		return NULL;
1953 
1954 	hdr = (void *) hdev->sent_cmd->data;
1955 
1956 	if (hdr->opcode != cpu_to_le16(opcode))
1957 		return NULL;
1958 
1959 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1960 
1961 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1962 }
1963 
1964 /* Send ACL data */
1965 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1966 {
1967 	struct hci_acl_hdr *hdr;
1968 	int len = skb->len;
1969 
1970 	skb_push(skb, HCI_ACL_HDR_SIZE);
1971 	skb_reset_transport_header(skb);
1972 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1973 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1974 	hdr->dlen   = cpu_to_le16(len);
1975 }
1976 
1977 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1978 {
1979 	struct hci_dev *hdev = conn->hdev;
1980 	struct sk_buff *list;
1981 
1982 	BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1983 
1984 	skb->dev = (void *) hdev;
1985 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1986 	hci_add_acl_hdr(skb, conn->handle, flags);
1987 
1988 	list = skb_shinfo(skb)->frag_list;
1989 	if (!list) {
1990 		/* Non fragmented */
1991 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1992 
1993 		skb_queue_tail(&conn->data_q, skb);
1994 	} else {
1995 		/* Fragmented */
1996 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1997 
1998 		skb_shinfo(skb)->frag_list = NULL;
1999 
2000 		/* Queue all fragments atomically */
2001 		spin_lock_bh(&conn->data_q.lock);
2002 
2003 		__skb_queue_tail(&conn->data_q, skb);
2004 
2005 		flags &= ~ACL_START;
2006 		flags |= ACL_CONT;
2007 		do {
2008 			skb = list; list = list->next;
2009 
2010 			skb->dev = (void *) hdev;
2011 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2012 			hci_add_acl_hdr(skb, conn->handle, flags);
2013 
2014 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2015 
2016 			__skb_queue_tail(&conn->data_q, skb);
2017 		} while (list);
2018 
2019 		spin_unlock_bh(&conn->data_q.lock);
2020 	}
2021 
2022 	tasklet_schedule(&hdev->tx_task);
2023 }
2024 EXPORT_SYMBOL(hci_send_acl);
2025 
2026 /* Send SCO data */
2027 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2028 {
2029 	struct hci_dev *hdev = conn->hdev;
2030 	struct hci_sco_hdr hdr;
2031 
2032 	BT_DBG("%s len %d", hdev->name, skb->len);
2033 
2034 	hdr.handle = cpu_to_le16(conn->handle);
2035 	hdr.dlen   = skb->len;
2036 
2037 	skb_push(skb, HCI_SCO_HDR_SIZE);
2038 	skb_reset_transport_header(skb);
2039 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2040 
2041 	skb->dev = (void *) hdev;
2042 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2043 
2044 	skb_queue_tail(&conn->data_q, skb);
2045 	tasklet_schedule(&hdev->tx_task);
2046 }
2047 EXPORT_SYMBOL(hci_send_sco);
2048 
2049 /* ---- HCI TX task (outgoing data) ---- */
2050 
2051 /* HCI Connection scheduler */
2052 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2053 {
2054 	struct hci_conn_hash *h = &hdev->conn_hash;
2055 	struct hci_conn *conn = NULL;
2056 	int num = 0, min = ~0;
2057 	struct list_head *p;
2058 
2059 	/* We don't have to lock device here. Connections are always
2060 	 * added and removed with TX task disabled. */
2061 	list_for_each(p, &h->list) {
2062 		struct hci_conn *c;
2063 		c = list_entry(p, struct hci_conn, list);
2064 
2065 		if (c->type != type || skb_queue_empty(&c->data_q))
2066 			continue;
2067 
2068 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2069 			continue;
2070 
2071 		num++;
2072 
2073 		if (c->sent < min) {
2074 			min  = c->sent;
2075 			conn = c;
2076 		}
2077 	}
2078 
2079 	if (conn) {
2080 		int cnt, q;
2081 
2082 		switch (conn->type) {
2083 		case ACL_LINK:
2084 			cnt = hdev->acl_cnt;
2085 			break;
2086 		case SCO_LINK:
2087 		case ESCO_LINK:
2088 			cnt = hdev->sco_cnt;
2089 			break;
2090 		case LE_LINK:
2091 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2092 			break;
2093 		default:
2094 			cnt = 0;
2095 			BT_ERR("Unknown link type");
2096 		}
2097 
2098 		q = cnt / num;
2099 		*quote = q ? q : 1;
2100 	} else
2101 		*quote = 0;
2102 
2103 	BT_DBG("conn %p quote %d", conn, *quote);
2104 	return conn;
2105 }
2106 
2107 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2108 {
2109 	struct hci_conn_hash *h = &hdev->conn_hash;
2110 	struct list_head *p;
2111 	struct hci_conn  *c;
2112 
2113 	BT_ERR("%s link tx timeout", hdev->name);
2114 
2115 	/* Kill stalled connections */
2116 	list_for_each(p, &h->list) {
2117 		c = list_entry(p, struct hci_conn, list);
2118 		if (c->type == type && c->sent) {
2119 			BT_ERR("%s killing stalled connection %s",
2120 				hdev->name, batostr(&c->dst));
2121 			hci_acl_disconn(c, 0x13);
2122 		}
2123 	}
2124 }
2125 
2126 static inline void hci_sched_acl(struct hci_dev *hdev)
2127 {
2128 	struct hci_conn *conn;
2129 	struct sk_buff *skb;
2130 	int quote;
2131 
2132 	BT_DBG("%s", hdev->name);
2133 
2134 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2135 		/* ACL tx timeout must be longer than maximum
2136 		 * link supervision timeout (40.9 seconds) */
2137 		if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2138 			hci_link_tx_to(hdev, ACL_LINK);
2139 	}
2140 
2141 	while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2142 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2143 			BT_DBG("skb %p len %d", skb, skb->len);
2144 
2145 			hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2146 
2147 			hci_send_frame(skb);
2148 			hdev->acl_last_tx = jiffies;
2149 
2150 			hdev->acl_cnt--;
2151 			conn->sent++;
2152 		}
2153 	}
2154 }
2155 
2156 /* Schedule SCO */
2157 static inline void hci_sched_sco(struct hci_dev *hdev)
2158 {
2159 	struct hci_conn *conn;
2160 	struct sk_buff *skb;
2161 	int quote;
2162 
2163 	BT_DBG("%s", hdev->name);
2164 
2165 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2166 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2167 			BT_DBG("skb %p len %d", skb, skb->len);
2168 			hci_send_frame(skb);
2169 
2170 			conn->sent++;
2171 			if (conn->sent == ~0)
2172 				conn->sent = 0;
2173 		}
2174 	}
2175 }
2176 
2177 static inline void hci_sched_esco(struct hci_dev *hdev)
2178 {
2179 	struct hci_conn *conn;
2180 	struct sk_buff *skb;
2181 	int quote;
2182 
2183 	BT_DBG("%s", hdev->name);
2184 
2185 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2186 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2187 			BT_DBG("skb %p len %d", skb, skb->len);
2188 			hci_send_frame(skb);
2189 
2190 			conn->sent++;
2191 			if (conn->sent == ~0)
2192 				conn->sent = 0;
2193 		}
2194 	}
2195 }
2196 
2197 static inline void hci_sched_le(struct hci_dev *hdev)
2198 {
2199 	struct hci_conn *conn;
2200 	struct sk_buff *skb;
2201 	int quote, cnt;
2202 
2203 	BT_DBG("%s", hdev->name);
2204 
2205 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2206 		/* LE tx timeout must be longer than maximum
2207 		 * link supervision timeout (40.9 seconds) */
2208 		if (!hdev->le_cnt && hdev->le_pkts &&
2209 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2210 			hci_link_tx_to(hdev, LE_LINK);
2211 	}
2212 
2213 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2214 	while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2215 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2216 			BT_DBG("skb %p len %d", skb, skb->len);
2217 
2218 			hci_send_frame(skb);
2219 			hdev->le_last_tx = jiffies;
2220 
2221 			cnt--;
2222 			conn->sent++;
2223 		}
2224 	}
2225 	if (hdev->le_pkts)
2226 		hdev->le_cnt = cnt;
2227 	else
2228 		hdev->acl_cnt = cnt;
2229 }
2230 
2231 static void hci_tx_task(unsigned long arg)
2232 {
2233 	struct hci_dev *hdev = (struct hci_dev *) arg;
2234 	struct sk_buff *skb;
2235 
2236 	read_lock(&hci_task_lock);
2237 
2238 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2239 		hdev->sco_cnt, hdev->le_cnt);
2240 
2241 	/* Schedule queues and send stuff to HCI driver */
2242 
2243 	hci_sched_acl(hdev);
2244 
2245 	hci_sched_sco(hdev);
2246 
2247 	hci_sched_esco(hdev);
2248 
2249 	hci_sched_le(hdev);
2250 
2251 	/* Send next queued raw (unknown type) packet */
2252 	while ((skb = skb_dequeue(&hdev->raw_q)))
2253 		hci_send_frame(skb);
2254 
2255 	read_unlock(&hci_task_lock);
2256 }
2257 
2258 /* ----- HCI RX task (incoming data processing) ----- */
2259 
2260 /* ACL data packet */
2261 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2262 {
2263 	struct hci_acl_hdr *hdr = (void *) skb->data;
2264 	struct hci_conn *conn;
2265 	__u16 handle, flags;
2266 
2267 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2268 
2269 	handle = __le16_to_cpu(hdr->handle);
2270 	flags  = hci_flags(handle);
2271 	handle = hci_handle(handle);
2272 
2273 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2274 
2275 	hdev->stat.acl_rx++;
2276 
2277 	hci_dev_lock(hdev);
2278 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2279 	hci_dev_unlock(hdev);
2280 
2281 	if (conn) {
2282 		register struct hci_proto *hp;
2283 
2284 		hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2285 
2286 		/* Send to upper protocol */
2287 		hp = hci_proto[HCI_PROTO_L2CAP];
2288 		if (hp && hp->recv_acldata) {
2289 			hp->recv_acldata(conn, skb, flags);
2290 			return;
2291 		}
2292 	} else {
2293 		BT_ERR("%s ACL packet for unknown connection handle %d",
2294 			hdev->name, handle);
2295 	}
2296 
2297 	kfree_skb(skb);
2298 }
2299 
2300 /* SCO data packet */
2301 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2302 {
2303 	struct hci_sco_hdr *hdr = (void *) skb->data;
2304 	struct hci_conn *conn;
2305 	__u16 handle;
2306 
2307 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2308 
2309 	handle = __le16_to_cpu(hdr->handle);
2310 
2311 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2312 
2313 	hdev->stat.sco_rx++;
2314 
2315 	hci_dev_lock(hdev);
2316 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2317 	hci_dev_unlock(hdev);
2318 
2319 	if (conn) {
2320 		register struct hci_proto *hp;
2321 
2322 		/* Send to upper protocol */
2323 		hp = hci_proto[HCI_PROTO_SCO];
2324 		if (hp && hp->recv_scodata) {
2325 			hp->recv_scodata(conn, skb);
2326 			return;
2327 		}
2328 	} else {
2329 		BT_ERR("%s SCO packet for unknown connection handle %d",
2330 			hdev->name, handle);
2331 	}
2332 
2333 	kfree_skb(skb);
2334 }
2335 
2336 static void hci_rx_task(unsigned long arg)
2337 {
2338 	struct hci_dev *hdev = (struct hci_dev *) arg;
2339 	struct sk_buff *skb;
2340 
2341 	BT_DBG("%s", hdev->name);
2342 
2343 	read_lock(&hci_task_lock);
2344 
2345 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2346 		if (atomic_read(&hdev->promisc)) {
2347 			/* Send copy to the sockets */
2348 			hci_send_to_sock(hdev, skb, NULL);
2349 		}
2350 
2351 		if (test_bit(HCI_RAW, &hdev->flags)) {
2352 			kfree_skb(skb);
2353 			continue;
2354 		}
2355 
2356 		if (test_bit(HCI_INIT, &hdev->flags)) {
2357 			/* Don't process data packets in this states. */
2358 			switch (bt_cb(skb)->pkt_type) {
2359 			case HCI_ACLDATA_PKT:
2360 			case HCI_SCODATA_PKT:
2361 				kfree_skb(skb);
2362 				continue;
2363 			}
2364 		}
2365 
2366 		/* Process frame */
2367 		switch (bt_cb(skb)->pkt_type) {
2368 		case HCI_EVENT_PKT:
2369 			hci_event_packet(hdev, skb);
2370 			break;
2371 
2372 		case HCI_ACLDATA_PKT:
2373 			BT_DBG("%s ACL data packet", hdev->name);
2374 			hci_acldata_packet(hdev, skb);
2375 			break;
2376 
2377 		case HCI_SCODATA_PKT:
2378 			BT_DBG("%s SCO data packet", hdev->name);
2379 			hci_scodata_packet(hdev, skb);
2380 			break;
2381 
2382 		default:
2383 			kfree_skb(skb);
2384 			break;
2385 		}
2386 	}
2387 
2388 	read_unlock(&hci_task_lock);
2389 }
2390 
2391 static void hci_cmd_task(unsigned long arg)
2392 {
2393 	struct hci_dev *hdev = (struct hci_dev *) arg;
2394 	struct sk_buff *skb;
2395 
2396 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2397 
2398 	/* Send queued commands */
2399 	if (atomic_read(&hdev->cmd_cnt)) {
2400 		skb = skb_dequeue(&hdev->cmd_q);
2401 		if (!skb)
2402 			return;
2403 
2404 		kfree_skb(hdev->sent_cmd);
2405 
2406 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2407 		if (hdev->sent_cmd) {
2408 			atomic_dec(&hdev->cmd_cnt);
2409 			hci_send_frame(skb);
2410 			if (test_bit(HCI_RESET, &hdev->flags))
2411 				del_timer(&hdev->cmd_timer);
2412 			else
2413 				mod_timer(&hdev->cmd_timer,
2414 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2415 		} else {
2416 			skb_queue_head(&hdev->cmd_q, skb);
2417 			tasklet_schedule(&hdev->cmd_task);
2418 		}
2419 	}
2420 }
2421