xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 565d76cb)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI core. */
26 
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30 
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46 
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50 
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 
54 #define AUTO_OFF_TIMEOUT 2000
55 
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60 
61 static DEFINE_RWLOCK(hci_task_lock);
62 
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66 
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70 
71 /* HCI protocols */
72 #define HCI_MAX_PROTO	2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 
78 /* ---- HCI notifications ---- */
79 
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82 	return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84 
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89 
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94 
95 /* ---- HCI requests ---- */
96 
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100 
101 	/* If this is the init phase check if the completed command matches
102 	 * the last init command, and if not just return.
103 	 */
104 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 		return;
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
117 
118 	if (hdev->req_status == HCI_REQ_PEND) {
119 		hdev->req_result = err;
120 		hdev->req_status = HCI_REQ_CANCELED;
121 		wake_up_interruptible(&hdev->req_wait_q);
122 	}
123 }
124 
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 					unsigned long opt, __u32 timeout)
128 {
129 	DECLARE_WAITQUEUE(wait, current);
130 	int err = 0;
131 
132 	BT_DBG("%s start", hdev->name);
133 
134 	hdev->req_status = HCI_REQ_PEND;
135 
136 	add_wait_queue(&hdev->req_wait_q, &wait);
137 	set_current_state(TASK_INTERRUPTIBLE);
138 
139 	req(hdev, opt);
140 	schedule_timeout(timeout);
141 
142 	remove_wait_queue(&hdev->req_wait_q, &wait);
143 
144 	if (signal_pending(current))
145 		return -EINTR;
146 
147 	switch (hdev->req_status) {
148 	case HCI_REQ_DONE:
149 		err = -bt_err(hdev->req_result);
150 		break;
151 
152 	case HCI_REQ_CANCELED:
153 		err = -hdev->req_result;
154 		break;
155 
156 	default:
157 		err = -ETIMEDOUT;
158 		break;
159 	}
160 
161 	hdev->req_status = hdev->req_result = 0;
162 
163 	BT_DBG("%s end: err %d", hdev->name, err);
164 
165 	return err;
166 }
167 
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 					unsigned long opt, __u32 timeout)
170 {
171 	int ret;
172 
173 	if (!test_bit(HCI_UP, &hdev->flags))
174 		return -ENETDOWN;
175 
176 	/* Serialize all requests */
177 	hci_req_lock(hdev);
178 	ret = __hci_request(hdev, req, opt, timeout);
179 	hci_req_unlock(hdev);
180 
181 	return ret;
182 }
183 
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 	BT_DBG("%s %ld", hdev->name, opt);
187 
188 	/* Reset device */
189 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 }
191 
192 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193 {
194 	struct hci_cp_delete_stored_link_key cp;
195 	struct sk_buff *skb;
196 	__le16 param;
197 	__u8 flt_type;
198 
199 	BT_DBG("%s %ld", hdev->name, opt);
200 
201 	/* Driver initialization */
202 
203 	/* Special commands */
204 	while ((skb = skb_dequeue(&hdev->driver_init))) {
205 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
206 		skb->dev = (void *) hdev;
207 
208 		skb_queue_tail(&hdev->cmd_q, skb);
209 		tasklet_schedule(&hdev->cmd_task);
210 	}
211 	skb_queue_purge(&hdev->driver_init);
212 
213 	/* Mandatory initialization */
214 
215 	/* Reset */
216 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
217 			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
218 
219 	/* Read Local Supported Features */
220 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
221 
222 	/* Read Local Version */
223 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
224 
225 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
226 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
227 
228 #if 0
229 	/* Host buffer size */
230 	{
231 		struct hci_cp_host_buffer_size cp;
232 		cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
233 		cp.sco_mtu = HCI_MAX_SCO_SIZE;
234 		cp.acl_max_pkt = cpu_to_le16(0xffff);
235 		cp.sco_max_pkt = cpu_to_le16(0xffff);
236 		hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
237 	}
238 #endif
239 
240 	/* Read BD Address */
241 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
242 
243 	/* Read Class of Device */
244 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
245 
246 	/* Read Local Name */
247 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
248 
249 	/* Read Voice Setting */
250 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
251 
252 	/* Optional initialization */
253 
254 	/* Clear Event Filters */
255 	flt_type = HCI_FLT_CLEAR_ALL;
256 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
257 
258 	/* Connection accept timeout ~20 secs */
259 	param = cpu_to_le16(0x7d00);
260 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
261 
262 	bacpy(&cp.bdaddr, BDADDR_ANY);
263 	cp.delete_all = 1;
264 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
265 }
266 
267 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
268 {
269 	BT_DBG("%s", hdev->name);
270 
271 	/* Read LE buffer size */
272 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
273 }
274 
275 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
276 {
277 	__u8 scan = opt;
278 
279 	BT_DBG("%s %x", hdev->name, scan);
280 
281 	/* Inquiry and Page scans */
282 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
283 }
284 
285 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
286 {
287 	__u8 auth = opt;
288 
289 	BT_DBG("%s %x", hdev->name, auth);
290 
291 	/* Authentication */
292 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
293 }
294 
295 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 	__u8 encrypt = opt;
298 
299 	BT_DBG("%s %x", hdev->name, encrypt);
300 
301 	/* Encryption */
302 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
303 }
304 
305 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
306 {
307 	__le16 policy = cpu_to_le16(opt);
308 
309 	BT_DBG("%s %x", hdev->name, policy);
310 
311 	/* Default link policy */
312 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
313 }
314 
315 /* Get HCI device by index.
316  * Device is held on return. */
317 struct hci_dev *hci_dev_get(int index)
318 {
319 	struct hci_dev *hdev = NULL;
320 	struct list_head *p;
321 
322 	BT_DBG("%d", index);
323 
324 	if (index < 0)
325 		return NULL;
326 
327 	read_lock(&hci_dev_list_lock);
328 	list_for_each(p, &hci_dev_list) {
329 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
330 		if (d->id == index) {
331 			hdev = hci_dev_hold(d);
332 			break;
333 		}
334 	}
335 	read_unlock(&hci_dev_list_lock);
336 	return hdev;
337 }
338 
339 /* ---- Inquiry support ---- */
340 static void inquiry_cache_flush(struct hci_dev *hdev)
341 {
342 	struct inquiry_cache *cache = &hdev->inq_cache;
343 	struct inquiry_entry *next  = cache->list, *e;
344 
345 	BT_DBG("cache %p", cache);
346 
347 	cache->list = NULL;
348 	while ((e = next)) {
349 		next = e->next;
350 		kfree(e);
351 	}
352 }
353 
354 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
355 {
356 	struct inquiry_cache *cache = &hdev->inq_cache;
357 	struct inquiry_entry *e;
358 
359 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
360 
361 	for (e = cache->list; e; e = e->next)
362 		if (!bacmp(&e->data.bdaddr, bdaddr))
363 			break;
364 	return e;
365 }
366 
367 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
368 {
369 	struct inquiry_cache *cache = &hdev->inq_cache;
370 	struct inquiry_entry *ie;
371 
372 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
373 
374 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
375 	if (!ie) {
376 		/* Entry not in the cache. Add new one. */
377 		ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
378 		if (!ie)
379 			return;
380 
381 		ie->next = cache->list;
382 		cache->list = ie;
383 	}
384 
385 	memcpy(&ie->data, data, sizeof(*data));
386 	ie->timestamp = jiffies;
387 	cache->timestamp = jiffies;
388 }
389 
390 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
391 {
392 	struct inquiry_cache *cache = &hdev->inq_cache;
393 	struct inquiry_info *info = (struct inquiry_info *) buf;
394 	struct inquiry_entry *e;
395 	int copied = 0;
396 
397 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
398 		struct inquiry_data *data = &e->data;
399 		bacpy(&info->bdaddr, &data->bdaddr);
400 		info->pscan_rep_mode	= data->pscan_rep_mode;
401 		info->pscan_period_mode	= data->pscan_period_mode;
402 		info->pscan_mode	= data->pscan_mode;
403 		memcpy(info->dev_class, data->dev_class, 3);
404 		info->clock_offset	= data->clock_offset;
405 		info++;
406 	}
407 
408 	BT_DBG("cache %p, copied %d", cache, copied);
409 	return copied;
410 }
411 
412 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
413 {
414 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
415 	struct hci_cp_inquiry cp;
416 
417 	BT_DBG("%s", hdev->name);
418 
419 	if (test_bit(HCI_INQUIRY, &hdev->flags))
420 		return;
421 
422 	/* Start Inquiry */
423 	memcpy(&cp.lap, &ir->lap, 3);
424 	cp.length  = ir->length;
425 	cp.num_rsp = ir->num_rsp;
426 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
427 }
428 
429 int hci_inquiry(void __user *arg)
430 {
431 	__u8 __user *ptr = arg;
432 	struct hci_inquiry_req ir;
433 	struct hci_dev *hdev;
434 	int err = 0, do_inquiry = 0, max_rsp;
435 	long timeo;
436 	__u8 *buf;
437 
438 	if (copy_from_user(&ir, ptr, sizeof(ir)))
439 		return -EFAULT;
440 
441 	hdev = hci_dev_get(ir.dev_id);
442 	if (!hdev)
443 		return -ENODEV;
444 
445 	hci_dev_lock_bh(hdev);
446 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
447 				inquiry_cache_empty(hdev) ||
448 				ir.flags & IREQ_CACHE_FLUSH) {
449 		inquiry_cache_flush(hdev);
450 		do_inquiry = 1;
451 	}
452 	hci_dev_unlock_bh(hdev);
453 
454 	timeo = ir.length * msecs_to_jiffies(2000);
455 
456 	if (do_inquiry) {
457 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
458 		if (err < 0)
459 			goto done;
460 	}
461 
462 	/* for unlimited number of responses we will use buffer with 255 entries */
463 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
464 
465 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
466 	 * copy it to the user space.
467 	 */
468 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
469 	if (!buf) {
470 		err = -ENOMEM;
471 		goto done;
472 	}
473 
474 	hci_dev_lock_bh(hdev);
475 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
476 	hci_dev_unlock_bh(hdev);
477 
478 	BT_DBG("num_rsp %d", ir.num_rsp);
479 
480 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
481 		ptr += sizeof(ir);
482 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
483 					ir.num_rsp))
484 			err = -EFAULT;
485 	} else
486 		err = -EFAULT;
487 
488 	kfree(buf);
489 
490 done:
491 	hci_dev_put(hdev);
492 	return err;
493 }
494 
495 /* ---- HCI ioctl helpers ---- */
496 
497 int hci_dev_open(__u16 dev)
498 {
499 	struct hci_dev *hdev;
500 	int ret = 0;
501 
502 	hdev = hci_dev_get(dev);
503 	if (!hdev)
504 		return -ENODEV;
505 
506 	BT_DBG("%s %p", hdev->name, hdev);
507 
508 	hci_req_lock(hdev);
509 
510 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
511 		ret = -ERFKILL;
512 		goto done;
513 	}
514 
515 	if (test_bit(HCI_UP, &hdev->flags)) {
516 		ret = -EALREADY;
517 		goto done;
518 	}
519 
520 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
521 		set_bit(HCI_RAW, &hdev->flags);
522 
523 	/* Treat all non BR/EDR controllers as raw devices for now */
524 	if (hdev->dev_type != HCI_BREDR)
525 		set_bit(HCI_RAW, &hdev->flags);
526 
527 	if (hdev->open(hdev)) {
528 		ret = -EIO;
529 		goto done;
530 	}
531 
532 	if (!test_bit(HCI_RAW, &hdev->flags)) {
533 		atomic_set(&hdev->cmd_cnt, 1);
534 		set_bit(HCI_INIT, &hdev->flags);
535 		hdev->init_last_cmd = 0;
536 
537 		ret = __hci_request(hdev, hci_init_req, 0,
538 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
539 
540 		if (lmp_le_capable(hdev))
541 			ret = __hci_request(hdev, hci_le_init_req, 0,
542 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
543 
544 		clear_bit(HCI_INIT, &hdev->flags);
545 	}
546 
547 	if (!ret) {
548 		hci_dev_hold(hdev);
549 		set_bit(HCI_UP, &hdev->flags);
550 		hci_notify(hdev, HCI_DEV_UP);
551 		if (!test_bit(HCI_SETUP, &hdev->flags))
552 			mgmt_powered(hdev->id, 1);
553 	} else {
554 		/* Init failed, cleanup */
555 		tasklet_kill(&hdev->rx_task);
556 		tasklet_kill(&hdev->tx_task);
557 		tasklet_kill(&hdev->cmd_task);
558 
559 		skb_queue_purge(&hdev->cmd_q);
560 		skb_queue_purge(&hdev->rx_q);
561 
562 		if (hdev->flush)
563 			hdev->flush(hdev);
564 
565 		if (hdev->sent_cmd) {
566 			kfree_skb(hdev->sent_cmd);
567 			hdev->sent_cmd = NULL;
568 		}
569 
570 		hdev->close(hdev);
571 		hdev->flags = 0;
572 	}
573 
574 done:
575 	hci_req_unlock(hdev);
576 	hci_dev_put(hdev);
577 	return ret;
578 }
579 
580 static int hci_dev_do_close(struct hci_dev *hdev)
581 {
582 	BT_DBG("%s %p", hdev->name, hdev);
583 
584 	hci_req_cancel(hdev, ENODEV);
585 	hci_req_lock(hdev);
586 
587 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
588 		hci_req_unlock(hdev);
589 		return 0;
590 	}
591 
592 	/* Kill RX and TX tasks */
593 	tasklet_kill(&hdev->rx_task);
594 	tasklet_kill(&hdev->tx_task);
595 
596 	hci_dev_lock_bh(hdev);
597 	inquiry_cache_flush(hdev);
598 	hci_conn_hash_flush(hdev);
599 	hci_dev_unlock_bh(hdev);
600 
601 	hci_notify(hdev, HCI_DEV_DOWN);
602 
603 	if (hdev->flush)
604 		hdev->flush(hdev);
605 
606 	/* Reset device */
607 	skb_queue_purge(&hdev->cmd_q);
608 	atomic_set(&hdev->cmd_cnt, 1);
609 	if (!test_bit(HCI_RAW, &hdev->flags)) {
610 		set_bit(HCI_INIT, &hdev->flags);
611 		__hci_request(hdev, hci_reset_req, 0,
612 					msecs_to_jiffies(250));
613 		clear_bit(HCI_INIT, &hdev->flags);
614 	}
615 
616 	/* Kill cmd task */
617 	tasklet_kill(&hdev->cmd_task);
618 
619 	/* Drop queues */
620 	skb_queue_purge(&hdev->rx_q);
621 	skb_queue_purge(&hdev->cmd_q);
622 	skb_queue_purge(&hdev->raw_q);
623 
624 	/* Drop last sent command */
625 	if (hdev->sent_cmd) {
626 		del_timer_sync(&hdev->cmd_timer);
627 		kfree_skb(hdev->sent_cmd);
628 		hdev->sent_cmd = NULL;
629 	}
630 
631 	/* After this point our queues are empty
632 	 * and no tasks are scheduled. */
633 	hdev->close(hdev);
634 
635 	mgmt_powered(hdev->id, 0);
636 
637 	/* Clear flags */
638 	hdev->flags = 0;
639 
640 	hci_req_unlock(hdev);
641 
642 	hci_dev_put(hdev);
643 	return 0;
644 }
645 
646 int hci_dev_close(__u16 dev)
647 {
648 	struct hci_dev *hdev;
649 	int err;
650 
651 	hdev = hci_dev_get(dev);
652 	if (!hdev)
653 		return -ENODEV;
654 	err = hci_dev_do_close(hdev);
655 	hci_dev_put(hdev);
656 	return err;
657 }
658 
659 int hci_dev_reset(__u16 dev)
660 {
661 	struct hci_dev *hdev;
662 	int ret = 0;
663 
664 	hdev = hci_dev_get(dev);
665 	if (!hdev)
666 		return -ENODEV;
667 
668 	hci_req_lock(hdev);
669 	tasklet_disable(&hdev->tx_task);
670 
671 	if (!test_bit(HCI_UP, &hdev->flags))
672 		goto done;
673 
674 	/* Drop queues */
675 	skb_queue_purge(&hdev->rx_q);
676 	skb_queue_purge(&hdev->cmd_q);
677 
678 	hci_dev_lock_bh(hdev);
679 	inquiry_cache_flush(hdev);
680 	hci_conn_hash_flush(hdev);
681 	hci_dev_unlock_bh(hdev);
682 
683 	if (hdev->flush)
684 		hdev->flush(hdev);
685 
686 	atomic_set(&hdev->cmd_cnt, 1);
687 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
688 
689 	if (!test_bit(HCI_RAW, &hdev->flags))
690 		ret = __hci_request(hdev, hci_reset_req, 0,
691 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 
693 done:
694 	tasklet_enable(&hdev->tx_task);
695 	hci_req_unlock(hdev);
696 	hci_dev_put(hdev);
697 	return ret;
698 }
699 
700 int hci_dev_reset_stat(__u16 dev)
701 {
702 	struct hci_dev *hdev;
703 	int ret = 0;
704 
705 	hdev = hci_dev_get(dev);
706 	if (!hdev)
707 		return -ENODEV;
708 
709 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
710 
711 	hci_dev_put(hdev);
712 
713 	return ret;
714 }
715 
716 int hci_dev_cmd(unsigned int cmd, void __user *arg)
717 {
718 	struct hci_dev *hdev;
719 	struct hci_dev_req dr;
720 	int err = 0;
721 
722 	if (copy_from_user(&dr, arg, sizeof(dr)))
723 		return -EFAULT;
724 
725 	hdev = hci_dev_get(dr.dev_id);
726 	if (!hdev)
727 		return -ENODEV;
728 
729 	switch (cmd) {
730 	case HCISETAUTH:
731 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
732 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
733 		break;
734 
735 	case HCISETENCRYPT:
736 		if (!lmp_encrypt_capable(hdev)) {
737 			err = -EOPNOTSUPP;
738 			break;
739 		}
740 
741 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
742 			/* Auth must be enabled first */
743 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
744 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
745 			if (err)
746 				break;
747 		}
748 
749 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
750 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
751 		break;
752 
753 	case HCISETSCAN:
754 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
755 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
756 		break;
757 
758 	case HCISETLINKPOL:
759 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
760 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
761 		break;
762 
763 	case HCISETLINKMODE:
764 		hdev->link_mode = ((__u16) dr.dev_opt) &
765 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
766 		break;
767 
768 	case HCISETPTYPE:
769 		hdev->pkt_type = (__u16) dr.dev_opt;
770 		break;
771 
772 	case HCISETACLMTU:
773 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
774 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
775 		break;
776 
777 	case HCISETSCOMTU:
778 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
779 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
780 		break;
781 
782 	default:
783 		err = -EINVAL;
784 		break;
785 	}
786 
787 	hci_dev_put(hdev);
788 	return err;
789 }
790 
791 int hci_get_dev_list(void __user *arg)
792 {
793 	struct hci_dev_list_req *dl;
794 	struct hci_dev_req *dr;
795 	struct list_head *p;
796 	int n = 0, size, err;
797 	__u16 dev_num;
798 
799 	if (get_user(dev_num, (__u16 __user *) arg))
800 		return -EFAULT;
801 
802 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
803 		return -EINVAL;
804 
805 	size = sizeof(*dl) + dev_num * sizeof(*dr);
806 
807 	dl = kzalloc(size, GFP_KERNEL);
808 	if (!dl)
809 		return -ENOMEM;
810 
811 	dr = dl->dev_req;
812 
813 	read_lock_bh(&hci_dev_list_lock);
814 	list_for_each(p, &hci_dev_list) {
815 		struct hci_dev *hdev;
816 
817 		hdev = list_entry(p, struct hci_dev, list);
818 
819 		hci_del_off_timer(hdev);
820 
821 		if (!test_bit(HCI_MGMT, &hdev->flags))
822 			set_bit(HCI_PAIRABLE, &hdev->flags);
823 
824 		(dr + n)->dev_id  = hdev->id;
825 		(dr + n)->dev_opt = hdev->flags;
826 
827 		if (++n >= dev_num)
828 			break;
829 	}
830 	read_unlock_bh(&hci_dev_list_lock);
831 
832 	dl->dev_num = n;
833 	size = sizeof(*dl) + n * sizeof(*dr);
834 
835 	err = copy_to_user(arg, dl, size);
836 	kfree(dl);
837 
838 	return err ? -EFAULT : 0;
839 }
840 
841 int hci_get_dev_info(void __user *arg)
842 {
843 	struct hci_dev *hdev;
844 	struct hci_dev_info di;
845 	int err = 0;
846 
847 	if (copy_from_user(&di, arg, sizeof(di)))
848 		return -EFAULT;
849 
850 	hdev = hci_dev_get(di.dev_id);
851 	if (!hdev)
852 		return -ENODEV;
853 
854 	hci_del_off_timer(hdev);
855 
856 	if (!test_bit(HCI_MGMT, &hdev->flags))
857 		set_bit(HCI_PAIRABLE, &hdev->flags);
858 
859 	strcpy(di.name, hdev->name);
860 	di.bdaddr   = hdev->bdaddr;
861 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
862 	di.flags    = hdev->flags;
863 	di.pkt_type = hdev->pkt_type;
864 	di.acl_mtu  = hdev->acl_mtu;
865 	di.acl_pkts = hdev->acl_pkts;
866 	di.sco_mtu  = hdev->sco_mtu;
867 	di.sco_pkts = hdev->sco_pkts;
868 	di.link_policy = hdev->link_policy;
869 	di.link_mode   = hdev->link_mode;
870 
871 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
872 	memcpy(&di.features, &hdev->features, sizeof(di.features));
873 
874 	if (copy_to_user(arg, &di, sizeof(di)))
875 		err = -EFAULT;
876 
877 	hci_dev_put(hdev);
878 
879 	return err;
880 }
881 
882 /* ---- Interface to HCI drivers ---- */
883 
884 static int hci_rfkill_set_block(void *data, bool blocked)
885 {
886 	struct hci_dev *hdev = data;
887 
888 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
889 
890 	if (!blocked)
891 		return 0;
892 
893 	hci_dev_do_close(hdev);
894 
895 	return 0;
896 }
897 
898 static const struct rfkill_ops hci_rfkill_ops = {
899 	.set_block = hci_rfkill_set_block,
900 };
901 
902 /* Alloc HCI device */
903 struct hci_dev *hci_alloc_dev(void)
904 {
905 	struct hci_dev *hdev;
906 
907 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
908 	if (!hdev)
909 		return NULL;
910 
911 	skb_queue_head_init(&hdev->driver_init);
912 
913 	return hdev;
914 }
915 EXPORT_SYMBOL(hci_alloc_dev);
916 
917 /* Free HCI device */
918 void hci_free_dev(struct hci_dev *hdev)
919 {
920 	skb_queue_purge(&hdev->driver_init);
921 
922 	/* will free via device release */
923 	put_device(&hdev->dev);
924 }
925 EXPORT_SYMBOL(hci_free_dev);
926 
927 static void hci_power_on(struct work_struct *work)
928 {
929 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
930 
931 	BT_DBG("%s", hdev->name);
932 
933 	if (hci_dev_open(hdev->id) < 0)
934 		return;
935 
936 	if (test_bit(HCI_AUTO_OFF, &hdev->flags))
937 		mod_timer(&hdev->off_timer,
938 				jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
939 
940 	if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
941 		mgmt_index_added(hdev->id);
942 }
943 
944 static void hci_power_off(struct work_struct *work)
945 {
946 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
947 
948 	BT_DBG("%s", hdev->name);
949 
950 	hci_dev_close(hdev->id);
951 }
952 
953 static void hci_auto_off(unsigned long data)
954 {
955 	struct hci_dev *hdev = (struct hci_dev *) data;
956 
957 	BT_DBG("%s", hdev->name);
958 
959 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
960 
961 	queue_work(hdev->workqueue, &hdev->power_off);
962 }
963 
964 void hci_del_off_timer(struct hci_dev *hdev)
965 {
966 	BT_DBG("%s", hdev->name);
967 
968 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
969 	del_timer(&hdev->off_timer);
970 }
971 
972 int hci_uuids_clear(struct hci_dev *hdev)
973 {
974 	struct list_head *p, *n;
975 
976 	list_for_each_safe(p, n, &hdev->uuids) {
977 		struct bt_uuid *uuid;
978 
979 		uuid = list_entry(p, struct bt_uuid, list);
980 
981 		list_del(p);
982 		kfree(uuid);
983 	}
984 
985 	return 0;
986 }
987 
988 int hci_link_keys_clear(struct hci_dev *hdev)
989 {
990 	struct list_head *p, *n;
991 
992 	list_for_each_safe(p, n, &hdev->link_keys) {
993 		struct link_key *key;
994 
995 		key = list_entry(p, struct link_key, list);
996 
997 		list_del(p);
998 		kfree(key);
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1005 {
1006 	struct list_head *p;
1007 
1008 	list_for_each(p, &hdev->link_keys) {
1009 		struct link_key *k;
1010 
1011 		k = list_entry(p, struct link_key, list);
1012 
1013 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1014 			return k;
1015 	}
1016 
1017 	return NULL;
1018 }
1019 
1020 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1021 						u8 *val, u8 type, u8 pin_len)
1022 {
1023 	struct link_key *key, *old_key;
1024 	u8 old_key_type;
1025 
1026 	old_key = hci_find_link_key(hdev, bdaddr);
1027 	if (old_key) {
1028 		old_key_type = old_key->type;
1029 		key = old_key;
1030 	} else {
1031 		old_key_type = 0xff;
1032 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1033 		if (!key)
1034 			return -ENOMEM;
1035 		list_add(&key->list, &hdev->link_keys);
1036 	}
1037 
1038 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1039 
1040 	bacpy(&key->bdaddr, bdaddr);
1041 	memcpy(key->val, val, 16);
1042 	key->type = type;
1043 	key->pin_len = pin_len;
1044 
1045 	if (new_key)
1046 		mgmt_new_key(hdev->id, key, old_key_type);
1047 
1048 	if (type == 0x06)
1049 		key->type = old_key_type;
1050 
1051 	return 0;
1052 }
1053 
1054 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1055 {
1056 	struct link_key *key;
1057 
1058 	key = hci_find_link_key(hdev, bdaddr);
1059 	if (!key)
1060 		return -ENOENT;
1061 
1062 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1063 
1064 	list_del(&key->list);
1065 	kfree(key);
1066 
1067 	return 0;
1068 }
1069 
1070 /* HCI command timer function */
1071 static void hci_cmd_timer(unsigned long arg)
1072 {
1073 	struct hci_dev *hdev = (void *) arg;
1074 
1075 	BT_ERR("%s command tx timeout", hdev->name);
1076 	atomic_set(&hdev->cmd_cnt, 1);
1077 	tasklet_schedule(&hdev->cmd_task);
1078 }
1079 
1080 /* Register HCI device */
1081 int hci_register_dev(struct hci_dev *hdev)
1082 {
1083 	struct list_head *head = &hci_dev_list, *p;
1084 	int i, id = 0;
1085 
1086 	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1087 						hdev->bus, hdev->owner);
1088 
1089 	if (!hdev->open || !hdev->close || !hdev->destruct)
1090 		return -EINVAL;
1091 
1092 	write_lock_bh(&hci_dev_list_lock);
1093 
1094 	/* Find first available device id */
1095 	list_for_each(p, &hci_dev_list) {
1096 		if (list_entry(p, struct hci_dev, list)->id != id)
1097 			break;
1098 		head = p; id++;
1099 	}
1100 
1101 	sprintf(hdev->name, "hci%d", id);
1102 	hdev->id = id;
1103 	list_add(&hdev->list, head);
1104 
1105 	atomic_set(&hdev->refcnt, 1);
1106 	spin_lock_init(&hdev->lock);
1107 
1108 	hdev->flags = 0;
1109 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1110 	hdev->esco_type = (ESCO_HV1);
1111 	hdev->link_mode = (HCI_LM_ACCEPT);
1112 	hdev->io_capability = 0x03; /* No Input No Output */
1113 
1114 	hdev->idle_timeout = 0;
1115 	hdev->sniff_max_interval = 800;
1116 	hdev->sniff_min_interval = 80;
1117 
1118 	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1119 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1120 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1121 
1122 	skb_queue_head_init(&hdev->rx_q);
1123 	skb_queue_head_init(&hdev->cmd_q);
1124 	skb_queue_head_init(&hdev->raw_q);
1125 
1126 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1127 
1128 	for (i = 0; i < NUM_REASSEMBLY; i++)
1129 		hdev->reassembly[i] = NULL;
1130 
1131 	init_waitqueue_head(&hdev->req_wait_q);
1132 	mutex_init(&hdev->req_lock);
1133 
1134 	inquiry_cache_init(hdev);
1135 
1136 	hci_conn_hash_init(hdev);
1137 
1138 	INIT_LIST_HEAD(&hdev->blacklist);
1139 
1140 	INIT_LIST_HEAD(&hdev->uuids);
1141 
1142 	INIT_LIST_HEAD(&hdev->link_keys);
1143 
1144 	INIT_WORK(&hdev->power_on, hci_power_on);
1145 	INIT_WORK(&hdev->power_off, hci_power_off);
1146 	setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1147 
1148 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1149 
1150 	atomic_set(&hdev->promisc, 0);
1151 
1152 	write_unlock_bh(&hci_dev_list_lock);
1153 
1154 	hdev->workqueue = create_singlethread_workqueue(hdev->name);
1155 	if (!hdev->workqueue)
1156 		goto nomem;
1157 
1158 	hci_register_sysfs(hdev);
1159 
1160 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1161 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1162 	if (hdev->rfkill) {
1163 		if (rfkill_register(hdev->rfkill) < 0) {
1164 			rfkill_destroy(hdev->rfkill);
1165 			hdev->rfkill = NULL;
1166 		}
1167 	}
1168 
1169 	set_bit(HCI_AUTO_OFF, &hdev->flags);
1170 	set_bit(HCI_SETUP, &hdev->flags);
1171 	queue_work(hdev->workqueue, &hdev->power_on);
1172 
1173 	hci_notify(hdev, HCI_DEV_REG);
1174 
1175 	return id;
1176 
1177 nomem:
1178 	write_lock_bh(&hci_dev_list_lock);
1179 	list_del(&hdev->list);
1180 	write_unlock_bh(&hci_dev_list_lock);
1181 
1182 	return -ENOMEM;
1183 }
1184 EXPORT_SYMBOL(hci_register_dev);
1185 
1186 /* Unregister HCI device */
1187 int hci_unregister_dev(struct hci_dev *hdev)
1188 {
1189 	int i;
1190 
1191 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1192 
1193 	write_lock_bh(&hci_dev_list_lock);
1194 	list_del(&hdev->list);
1195 	write_unlock_bh(&hci_dev_list_lock);
1196 
1197 	hci_dev_do_close(hdev);
1198 
1199 	for (i = 0; i < NUM_REASSEMBLY; i++)
1200 		kfree_skb(hdev->reassembly[i]);
1201 
1202 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1203 					!test_bit(HCI_SETUP, &hdev->flags))
1204 		mgmt_index_removed(hdev->id);
1205 
1206 	hci_notify(hdev, HCI_DEV_UNREG);
1207 
1208 	if (hdev->rfkill) {
1209 		rfkill_unregister(hdev->rfkill);
1210 		rfkill_destroy(hdev->rfkill);
1211 	}
1212 
1213 	hci_unregister_sysfs(hdev);
1214 
1215 	hci_del_off_timer(hdev);
1216 
1217 	destroy_workqueue(hdev->workqueue);
1218 
1219 	hci_dev_lock_bh(hdev);
1220 	hci_blacklist_clear(hdev);
1221 	hci_uuids_clear(hdev);
1222 	hci_link_keys_clear(hdev);
1223 	hci_dev_unlock_bh(hdev);
1224 
1225 	__hci_dev_put(hdev);
1226 
1227 	return 0;
1228 }
1229 EXPORT_SYMBOL(hci_unregister_dev);
1230 
1231 /* Suspend HCI device */
1232 int hci_suspend_dev(struct hci_dev *hdev)
1233 {
1234 	hci_notify(hdev, HCI_DEV_SUSPEND);
1235 	return 0;
1236 }
1237 EXPORT_SYMBOL(hci_suspend_dev);
1238 
1239 /* Resume HCI device */
1240 int hci_resume_dev(struct hci_dev *hdev)
1241 {
1242 	hci_notify(hdev, HCI_DEV_RESUME);
1243 	return 0;
1244 }
1245 EXPORT_SYMBOL(hci_resume_dev);
1246 
1247 /* Receive frame from HCI drivers */
1248 int hci_recv_frame(struct sk_buff *skb)
1249 {
1250 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1251 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1252 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1253 		kfree_skb(skb);
1254 		return -ENXIO;
1255 	}
1256 
1257 	/* Incomming skb */
1258 	bt_cb(skb)->incoming = 1;
1259 
1260 	/* Time stamp */
1261 	__net_timestamp(skb);
1262 
1263 	/* Queue frame for rx task */
1264 	skb_queue_tail(&hdev->rx_q, skb);
1265 	tasklet_schedule(&hdev->rx_task);
1266 
1267 	return 0;
1268 }
1269 EXPORT_SYMBOL(hci_recv_frame);
1270 
1271 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1272 			  int count, __u8 index, gfp_t gfp_mask)
1273 {
1274 	int len = 0;
1275 	int hlen = 0;
1276 	int remain = count;
1277 	struct sk_buff *skb;
1278 	struct bt_skb_cb *scb;
1279 
1280 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1281 				index >= NUM_REASSEMBLY)
1282 		return -EILSEQ;
1283 
1284 	skb = hdev->reassembly[index];
1285 
1286 	if (!skb) {
1287 		switch (type) {
1288 		case HCI_ACLDATA_PKT:
1289 			len = HCI_MAX_FRAME_SIZE;
1290 			hlen = HCI_ACL_HDR_SIZE;
1291 			break;
1292 		case HCI_EVENT_PKT:
1293 			len = HCI_MAX_EVENT_SIZE;
1294 			hlen = HCI_EVENT_HDR_SIZE;
1295 			break;
1296 		case HCI_SCODATA_PKT:
1297 			len = HCI_MAX_SCO_SIZE;
1298 			hlen = HCI_SCO_HDR_SIZE;
1299 			break;
1300 		}
1301 
1302 		skb = bt_skb_alloc(len, gfp_mask);
1303 		if (!skb)
1304 			return -ENOMEM;
1305 
1306 		scb = (void *) skb->cb;
1307 		scb->expect = hlen;
1308 		scb->pkt_type = type;
1309 
1310 		skb->dev = (void *) hdev;
1311 		hdev->reassembly[index] = skb;
1312 	}
1313 
1314 	while (count) {
1315 		scb = (void *) skb->cb;
1316 		len = min(scb->expect, (__u16)count);
1317 
1318 		memcpy(skb_put(skb, len), data, len);
1319 
1320 		count -= len;
1321 		data += len;
1322 		scb->expect -= len;
1323 		remain = count;
1324 
1325 		switch (type) {
1326 		case HCI_EVENT_PKT:
1327 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1328 				struct hci_event_hdr *h = hci_event_hdr(skb);
1329 				scb->expect = h->plen;
1330 
1331 				if (skb_tailroom(skb) < scb->expect) {
1332 					kfree_skb(skb);
1333 					hdev->reassembly[index] = NULL;
1334 					return -ENOMEM;
1335 				}
1336 			}
1337 			break;
1338 
1339 		case HCI_ACLDATA_PKT:
1340 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1341 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1342 				scb->expect = __le16_to_cpu(h->dlen);
1343 
1344 				if (skb_tailroom(skb) < scb->expect) {
1345 					kfree_skb(skb);
1346 					hdev->reassembly[index] = NULL;
1347 					return -ENOMEM;
1348 				}
1349 			}
1350 			break;
1351 
1352 		case HCI_SCODATA_PKT:
1353 			if (skb->len == HCI_SCO_HDR_SIZE) {
1354 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1355 				scb->expect = h->dlen;
1356 
1357 				if (skb_tailroom(skb) < scb->expect) {
1358 					kfree_skb(skb);
1359 					hdev->reassembly[index] = NULL;
1360 					return -ENOMEM;
1361 				}
1362 			}
1363 			break;
1364 		}
1365 
1366 		if (scb->expect == 0) {
1367 			/* Complete frame */
1368 
1369 			bt_cb(skb)->pkt_type = type;
1370 			hci_recv_frame(skb);
1371 
1372 			hdev->reassembly[index] = NULL;
1373 			return remain;
1374 		}
1375 	}
1376 
1377 	return remain;
1378 }
1379 
1380 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1381 {
1382 	int rem = 0;
1383 
1384 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1385 		return -EILSEQ;
1386 
1387 	while (count) {
1388 		rem = hci_reassembly(hdev, type, data, count,
1389 						type - 1, GFP_ATOMIC);
1390 		if (rem < 0)
1391 			return rem;
1392 
1393 		data += (count - rem);
1394 		count = rem;
1395 	};
1396 
1397 	return rem;
1398 }
1399 EXPORT_SYMBOL(hci_recv_fragment);
1400 
1401 #define STREAM_REASSEMBLY 0
1402 
1403 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1404 {
1405 	int type;
1406 	int rem = 0;
1407 
1408 	while (count) {
1409 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1410 
1411 		if (!skb) {
1412 			struct { char type; } *pkt;
1413 
1414 			/* Start of the frame */
1415 			pkt = data;
1416 			type = pkt->type;
1417 
1418 			data++;
1419 			count--;
1420 		} else
1421 			type = bt_cb(skb)->pkt_type;
1422 
1423 		rem = hci_reassembly(hdev, type, data,
1424 					count, STREAM_REASSEMBLY, GFP_ATOMIC);
1425 		if (rem < 0)
1426 			return rem;
1427 
1428 		data += (count - rem);
1429 		count = rem;
1430 	};
1431 
1432 	return rem;
1433 }
1434 EXPORT_SYMBOL(hci_recv_stream_fragment);
1435 
1436 /* ---- Interface to upper protocols ---- */
1437 
1438 /* Register/Unregister protocols.
1439  * hci_task_lock is used to ensure that no tasks are running. */
1440 int hci_register_proto(struct hci_proto *hp)
1441 {
1442 	int err = 0;
1443 
1444 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1445 
1446 	if (hp->id >= HCI_MAX_PROTO)
1447 		return -EINVAL;
1448 
1449 	write_lock_bh(&hci_task_lock);
1450 
1451 	if (!hci_proto[hp->id])
1452 		hci_proto[hp->id] = hp;
1453 	else
1454 		err = -EEXIST;
1455 
1456 	write_unlock_bh(&hci_task_lock);
1457 
1458 	return err;
1459 }
1460 EXPORT_SYMBOL(hci_register_proto);
1461 
1462 int hci_unregister_proto(struct hci_proto *hp)
1463 {
1464 	int err = 0;
1465 
1466 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1467 
1468 	if (hp->id >= HCI_MAX_PROTO)
1469 		return -EINVAL;
1470 
1471 	write_lock_bh(&hci_task_lock);
1472 
1473 	if (hci_proto[hp->id])
1474 		hci_proto[hp->id] = NULL;
1475 	else
1476 		err = -ENOENT;
1477 
1478 	write_unlock_bh(&hci_task_lock);
1479 
1480 	return err;
1481 }
1482 EXPORT_SYMBOL(hci_unregister_proto);
1483 
1484 int hci_register_cb(struct hci_cb *cb)
1485 {
1486 	BT_DBG("%p name %s", cb, cb->name);
1487 
1488 	write_lock_bh(&hci_cb_list_lock);
1489 	list_add(&cb->list, &hci_cb_list);
1490 	write_unlock_bh(&hci_cb_list_lock);
1491 
1492 	return 0;
1493 }
1494 EXPORT_SYMBOL(hci_register_cb);
1495 
1496 int hci_unregister_cb(struct hci_cb *cb)
1497 {
1498 	BT_DBG("%p name %s", cb, cb->name);
1499 
1500 	write_lock_bh(&hci_cb_list_lock);
1501 	list_del(&cb->list);
1502 	write_unlock_bh(&hci_cb_list_lock);
1503 
1504 	return 0;
1505 }
1506 EXPORT_SYMBOL(hci_unregister_cb);
1507 
1508 static int hci_send_frame(struct sk_buff *skb)
1509 {
1510 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1511 
1512 	if (!hdev) {
1513 		kfree_skb(skb);
1514 		return -ENODEV;
1515 	}
1516 
1517 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1518 
1519 	if (atomic_read(&hdev->promisc)) {
1520 		/* Time stamp */
1521 		__net_timestamp(skb);
1522 
1523 		hci_send_to_sock(hdev, skb, NULL);
1524 	}
1525 
1526 	/* Get rid of skb owner, prior to sending to the driver. */
1527 	skb_orphan(skb);
1528 
1529 	return hdev->send(skb);
1530 }
1531 
1532 /* Send HCI command */
1533 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1534 {
1535 	int len = HCI_COMMAND_HDR_SIZE + plen;
1536 	struct hci_command_hdr *hdr;
1537 	struct sk_buff *skb;
1538 
1539 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1540 
1541 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1542 	if (!skb) {
1543 		BT_ERR("%s no memory for command", hdev->name);
1544 		return -ENOMEM;
1545 	}
1546 
1547 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1548 	hdr->opcode = cpu_to_le16(opcode);
1549 	hdr->plen   = plen;
1550 
1551 	if (plen)
1552 		memcpy(skb_put(skb, plen), param, plen);
1553 
1554 	BT_DBG("skb len %d", skb->len);
1555 
1556 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1557 	skb->dev = (void *) hdev;
1558 
1559 	if (test_bit(HCI_INIT, &hdev->flags))
1560 		hdev->init_last_cmd = opcode;
1561 
1562 	skb_queue_tail(&hdev->cmd_q, skb);
1563 	tasklet_schedule(&hdev->cmd_task);
1564 
1565 	return 0;
1566 }
1567 
1568 /* Get data from the previously sent command */
1569 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1570 {
1571 	struct hci_command_hdr *hdr;
1572 
1573 	if (!hdev->sent_cmd)
1574 		return NULL;
1575 
1576 	hdr = (void *) hdev->sent_cmd->data;
1577 
1578 	if (hdr->opcode != cpu_to_le16(opcode))
1579 		return NULL;
1580 
1581 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1582 
1583 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1584 }
1585 
1586 /* Send ACL data */
1587 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1588 {
1589 	struct hci_acl_hdr *hdr;
1590 	int len = skb->len;
1591 
1592 	skb_push(skb, HCI_ACL_HDR_SIZE);
1593 	skb_reset_transport_header(skb);
1594 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1595 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1596 	hdr->dlen   = cpu_to_le16(len);
1597 }
1598 
1599 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1600 {
1601 	struct hci_dev *hdev = conn->hdev;
1602 	struct sk_buff *list;
1603 
1604 	BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1605 
1606 	skb->dev = (void *) hdev;
1607 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1608 	hci_add_acl_hdr(skb, conn->handle, flags);
1609 
1610 	list = skb_shinfo(skb)->frag_list;
1611 	if (!list) {
1612 		/* Non fragmented */
1613 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1614 
1615 		skb_queue_tail(&conn->data_q, skb);
1616 	} else {
1617 		/* Fragmented */
1618 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1619 
1620 		skb_shinfo(skb)->frag_list = NULL;
1621 
1622 		/* Queue all fragments atomically */
1623 		spin_lock_bh(&conn->data_q.lock);
1624 
1625 		__skb_queue_tail(&conn->data_q, skb);
1626 
1627 		flags &= ~ACL_START;
1628 		flags |= ACL_CONT;
1629 		do {
1630 			skb = list; list = list->next;
1631 
1632 			skb->dev = (void *) hdev;
1633 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1634 			hci_add_acl_hdr(skb, conn->handle, flags);
1635 
1636 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1637 
1638 			__skb_queue_tail(&conn->data_q, skb);
1639 		} while (list);
1640 
1641 		spin_unlock_bh(&conn->data_q.lock);
1642 	}
1643 
1644 	tasklet_schedule(&hdev->tx_task);
1645 }
1646 EXPORT_SYMBOL(hci_send_acl);
1647 
1648 /* Send SCO data */
1649 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1650 {
1651 	struct hci_dev *hdev = conn->hdev;
1652 	struct hci_sco_hdr hdr;
1653 
1654 	BT_DBG("%s len %d", hdev->name, skb->len);
1655 
1656 	hdr.handle = cpu_to_le16(conn->handle);
1657 	hdr.dlen   = skb->len;
1658 
1659 	skb_push(skb, HCI_SCO_HDR_SIZE);
1660 	skb_reset_transport_header(skb);
1661 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1662 
1663 	skb->dev = (void *) hdev;
1664 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1665 
1666 	skb_queue_tail(&conn->data_q, skb);
1667 	tasklet_schedule(&hdev->tx_task);
1668 }
1669 EXPORT_SYMBOL(hci_send_sco);
1670 
1671 /* ---- HCI TX task (outgoing data) ---- */
1672 
1673 /* HCI Connection scheduler */
1674 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1675 {
1676 	struct hci_conn_hash *h = &hdev->conn_hash;
1677 	struct hci_conn *conn = NULL;
1678 	int num = 0, min = ~0;
1679 	struct list_head *p;
1680 
1681 	/* We don't have to lock device here. Connections are always
1682 	 * added and removed with TX task disabled. */
1683 	list_for_each(p, &h->list) {
1684 		struct hci_conn *c;
1685 		c = list_entry(p, struct hci_conn, list);
1686 
1687 		if (c->type != type || skb_queue_empty(&c->data_q))
1688 			continue;
1689 
1690 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1691 			continue;
1692 
1693 		num++;
1694 
1695 		if (c->sent < min) {
1696 			min  = c->sent;
1697 			conn = c;
1698 		}
1699 	}
1700 
1701 	if (conn) {
1702 		int cnt, q;
1703 
1704 		switch (conn->type) {
1705 		case ACL_LINK:
1706 			cnt = hdev->acl_cnt;
1707 			break;
1708 		case SCO_LINK:
1709 		case ESCO_LINK:
1710 			cnt = hdev->sco_cnt;
1711 			break;
1712 		case LE_LINK:
1713 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1714 			break;
1715 		default:
1716 			cnt = 0;
1717 			BT_ERR("Unknown link type");
1718 		}
1719 
1720 		q = cnt / num;
1721 		*quote = q ? q : 1;
1722 	} else
1723 		*quote = 0;
1724 
1725 	BT_DBG("conn %p quote %d", conn, *quote);
1726 	return conn;
1727 }
1728 
1729 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1730 {
1731 	struct hci_conn_hash *h = &hdev->conn_hash;
1732 	struct list_head *p;
1733 	struct hci_conn  *c;
1734 
1735 	BT_ERR("%s link tx timeout", hdev->name);
1736 
1737 	/* Kill stalled connections */
1738 	list_for_each(p, &h->list) {
1739 		c = list_entry(p, struct hci_conn, list);
1740 		if (c->type == type && c->sent) {
1741 			BT_ERR("%s killing stalled connection %s",
1742 				hdev->name, batostr(&c->dst));
1743 			hci_acl_disconn(c, 0x13);
1744 		}
1745 	}
1746 }
1747 
1748 static inline void hci_sched_acl(struct hci_dev *hdev)
1749 {
1750 	struct hci_conn *conn;
1751 	struct sk_buff *skb;
1752 	int quote;
1753 
1754 	BT_DBG("%s", hdev->name);
1755 
1756 	if (!test_bit(HCI_RAW, &hdev->flags)) {
1757 		/* ACL tx timeout must be longer than maximum
1758 		 * link supervision timeout (40.9 seconds) */
1759 		if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1760 			hci_link_tx_to(hdev, ACL_LINK);
1761 	}
1762 
1763 	while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1764 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1765 			BT_DBG("skb %p len %d", skb, skb->len);
1766 
1767 			hci_conn_enter_active_mode(conn);
1768 
1769 			hci_send_frame(skb);
1770 			hdev->acl_last_tx = jiffies;
1771 
1772 			hdev->acl_cnt--;
1773 			conn->sent++;
1774 		}
1775 	}
1776 }
1777 
1778 /* Schedule SCO */
1779 static inline void hci_sched_sco(struct hci_dev *hdev)
1780 {
1781 	struct hci_conn *conn;
1782 	struct sk_buff *skb;
1783 	int quote;
1784 
1785 	BT_DBG("%s", hdev->name);
1786 
1787 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1788 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1789 			BT_DBG("skb %p len %d", skb, skb->len);
1790 			hci_send_frame(skb);
1791 
1792 			conn->sent++;
1793 			if (conn->sent == ~0)
1794 				conn->sent = 0;
1795 		}
1796 	}
1797 }
1798 
1799 static inline void hci_sched_esco(struct hci_dev *hdev)
1800 {
1801 	struct hci_conn *conn;
1802 	struct sk_buff *skb;
1803 	int quote;
1804 
1805 	BT_DBG("%s", hdev->name);
1806 
1807 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1808 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1809 			BT_DBG("skb %p len %d", skb, skb->len);
1810 			hci_send_frame(skb);
1811 
1812 			conn->sent++;
1813 			if (conn->sent == ~0)
1814 				conn->sent = 0;
1815 		}
1816 	}
1817 }
1818 
1819 static inline void hci_sched_le(struct hci_dev *hdev)
1820 {
1821 	struct hci_conn *conn;
1822 	struct sk_buff *skb;
1823 	int quote, cnt;
1824 
1825 	BT_DBG("%s", hdev->name);
1826 
1827 	if (!test_bit(HCI_RAW, &hdev->flags)) {
1828 		/* LE tx timeout must be longer than maximum
1829 		 * link supervision timeout (40.9 seconds) */
1830 		if (!hdev->le_cnt && hdev->le_pkts &&
1831 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
1832 			hci_link_tx_to(hdev, LE_LINK);
1833 	}
1834 
1835 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1836 	while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1837 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1838 			BT_DBG("skb %p len %d", skb, skb->len);
1839 
1840 			hci_send_frame(skb);
1841 			hdev->le_last_tx = jiffies;
1842 
1843 			cnt--;
1844 			conn->sent++;
1845 		}
1846 	}
1847 	if (hdev->le_pkts)
1848 		hdev->le_cnt = cnt;
1849 	else
1850 		hdev->acl_cnt = cnt;
1851 }
1852 
1853 static void hci_tx_task(unsigned long arg)
1854 {
1855 	struct hci_dev *hdev = (struct hci_dev *) arg;
1856 	struct sk_buff *skb;
1857 
1858 	read_lock(&hci_task_lock);
1859 
1860 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1861 		hdev->sco_cnt, hdev->le_cnt);
1862 
1863 	/* Schedule queues and send stuff to HCI driver */
1864 
1865 	hci_sched_acl(hdev);
1866 
1867 	hci_sched_sco(hdev);
1868 
1869 	hci_sched_esco(hdev);
1870 
1871 	hci_sched_le(hdev);
1872 
1873 	/* Send next queued raw (unknown type) packet */
1874 	while ((skb = skb_dequeue(&hdev->raw_q)))
1875 		hci_send_frame(skb);
1876 
1877 	read_unlock(&hci_task_lock);
1878 }
1879 
1880 /* ----- HCI RX task (incoming data proccessing) ----- */
1881 
1882 /* ACL data packet */
1883 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1884 {
1885 	struct hci_acl_hdr *hdr = (void *) skb->data;
1886 	struct hci_conn *conn;
1887 	__u16 handle, flags;
1888 
1889 	skb_pull(skb, HCI_ACL_HDR_SIZE);
1890 
1891 	handle = __le16_to_cpu(hdr->handle);
1892 	flags  = hci_flags(handle);
1893 	handle = hci_handle(handle);
1894 
1895 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1896 
1897 	hdev->stat.acl_rx++;
1898 
1899 	hci_dev_lock(hdev);
1900 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1901 	hci_dev_unlock(hdev);
1902 
1903 	if (conn) {
1904 		register struct hci_proto *hp;
1905 
1906 		hci_conn_enter_active_mode(conn);
1907 
1908 		/* Send to upper protocol */
1909 		hp = hci_proto[HCI_PROTO_L2CAP];
1910 		if (hp && hp->recv_acldata) {
1911 			hp->recv_acldata(conn, skb, flags);
1912 			return;
1913 		}
1914 	} else {
1915 		BT_ERR("%s ACL packet for unknown connection handle %d",
1916 			hdev->name, handle);
1917 	}
1918 
1919 	kfree_skb(skb);
1920 }
1921 
1922 /* SCO data packet */
1923 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1924 {
1925 	struct hci_sco_hdr *hdr = (void *) skb->data;
1926 	struct hci_conn *conn;
1927 	__u16 handle;
1928 
1929 	skb_pull(skb, HCI_SCO_HDR_SIZE);
1930 
1931 	handle = __le16_to_cpu(hdr->handle);
1932 
1933 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1934 
1935 	hdev->stat.sco_rx++;
1936 
1937 	hci_dev_lock(hdev);
1938 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1939 	hci_dev_unlock(hdev);
1940 
1941 	if (conn) {
1942 		register struct hci_proto *hp;
1943 
1944 		/* Send to upper protocol */
1945 		hp = hci_proto[HCI_PROTO_SCO];
1946 		if (hp && hp->recv_scodata) {
1947 			hp->recv_scodata(conn, skb);
1948 			return;
1949 		}
1950 	} else {
1951 		BT_ERR("%s SCO packet for unknown connection handle %d",
1952 			hdev->name, handle);
1953 	}
1954 
1955 	kfree_skb(skb);
1956 }
1957 
1958 static void hci_rx_task(unsigned long arg)
1959 {
1960 	struct hci_dev *hdev = (struct hci_dev *) arg;
1961 	struct sk_buff *skb;
1962 
1963 	BT_DBG("%s", hdev->name);
1964 
1965 	read_lock(&hci_task_lock);
1966 
1967 	while ((skb = skb_dequeue(&hdev->rx_q))) {
1968 		if (atomic_read(&hdev->promisc)) {
1969 			/* Send copy to the sockets */
1970 			hci_send_to_sock(hdev, skb, NULL);
1971 		}
1972 
1973 		if (test_bit(HCI_RAW, &hdev->flags)) {
1974 			kfree_skb(skb);
1975 			continue;
1976 		}
1977 
1978 		if (test_bit(HCI_INIT, &hdev->flags)) {
1979 			/* Don't process data packets in this states. */
1980 			switch (bt_cb(skb)->pkt_type) {
1981 			case HCI_ACLDATA_PKT:
1982 			case HCI_SCODATA_PKT:
1983 				kfree_skb(skb);
1984 				continue;
1985 			}
1986 		}
1987 
1988 		/* Process frame */
1989 		switch (bt_cb(skb)->pkt_type) {
1990 		case HCI_EVENT_PKT:
1991 			hci_event_packet(hdev, skb);
1992 			break;
1993 
1994 		case HCI_ACLDATA_PKT:
1995 			BT_DBG("%s ACL data packet", hdev->name);
1996 			hci_acldata_packet(hdev, skb);
1997 			break;
1998 
1999 		case HCI_SCODATA_PKT:
2000 			BT_DBG("%s SCO data packet", hdev->name);
2001 			hci_scodata_packet(hdev, skb);
2002 			break;
2003 
2004 		default:
2005 			kfree_skb(skb);
2006 			break;
2007 		}
2008 	}
2009 
2010 	read_unlock(&hci_task_lock);
2011 }
2012 
2013 static void hci_cmd_task(unsigned long arg)
2014 {
2015 	struct hci_dev *hdev = (struct hci_dev *) arg;
2016 	struct sk_buff *skb;
2017 
2018 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2019 
2020 	/* Send queued commands */
2021 	if (atomic_read(&hdev->cmd_cnt)) {
2022 		skb = skb_dequeue(&hdev->cmd_q);
2023 		if (!skb)
2024 			return;
2025 
2026 		kfree_skb(hdev->sent_cmd);
2027 
2028 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2029 		if (hdev->sent_cmd) {
2030 			atomic_dec(&hdev->cmd_cnt);
2031 			hci_send_frame(skb);
2032 			mod_timer(&hdev->cmd_timer,
2033 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2034 		} else {
2035 			skb_queue_head(&hdev->cmd_q, skb);
2036 			tasklet_schedule(&hdev->cmd_task);
2037 		}
2038 	}
2039 }
2040