xref: /openbmc/linux/net/bluetooth/hci_core.c (revision fd589a8f)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI core. */
26 
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30 
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
44 
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48 
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
56 
57 static DEFINE_RWLOCK(hci_task_lock);
58 
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
62 
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
66 
67 /* HCI protocols */
68 #define HCI_MAX_PROTO	2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70 
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73 
74 /* ---- HCI notifications ---- */
75 
76 int hci_register_notifier(struct notifier_block *nb)
77 {
78 	return atomic_notifier_chain_register(&hci_notifier, nb);
79 }
80 
81 int hci_unregister_notifier(struct notifier_block *nb)
82 {
83 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
84 }
85 
86 static void hci_notify(struct hci_dev *hdev, int event)
87 {
88 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
89 }
90 
91 /* ---- HCI requests ---- */
92 
93 void hci_req_complete(struct hci_dev *hdev, int result)
94 {
95 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
96 
97 	if (hdev->req_status == HCI_REQ_PEND) {
98 		hdev->req_result = result;
99 		hdev->req_status = HCI_REQ_DONE;
100 		wake_up_interruptible(&hdev->req_wait_q);
101 	}
102 }
103 
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 {
106 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
107 
108 	if (hdev->req_status == HCI_REQ_PEND) {
109 		hdev->req_result = err;
110 		hdev->req_status = HCI_REQ_CANCELED;
111 		wake_up_interruptible(&hdev->req_wait_q);
112 	}
113 }
114 
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117 				unsigned long opt, __u32 timeout)
118 {
119 	DECLARE_WAITQUEUE(wait, current);
120 	int err = 0;
121 
122 	BT_DBG("%s start", hdev->name);
123 
124 	hdev->req_status = HCI_REQ_PEND;
125 
126 	add_wait_queue(&hdev->req_wait_q, &wait);
127 	set_current_state(TASK_INTERRUPTIBLE);
128 
129 	req(hdev, opt);
130 	schedule_timeout(timeout);
131 
132 	remove_wait_queue(&hdev->req_wait_q, &wait);
133 
134 	if (signal_pending(current))
135 		return -EINTR;
136 
137 	switch (hdev->req_status) {
138 	case HCI_REQ_DONE:
139 		err = -bt_err(hdev->req_result);
140 		break;
141 
142 	case HCI_REQ_CANCELED:
143 		err = -hdev->req_result;
144 		break;
145 
146 	default:
147 		err = -ETIMEDOUT;
148 		break;
149 	}
150 
151 	hdev->req_status = hdev->req_result = 0;
152 
153 	BT_DBG("%s end: err %d", hdev->name, err);
154 
155 	return err;
156 }
157 
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159 				unsigned long opt, __u32 timeout)
160 {
161 	int ret;
162 
163 	if (!test_bit(HCI_UP, &hdev->flags))
164 		return -ENETDOWN;
165 
166 	/* Serialize all requests */
167 	hci_req_lock(hdev);
168 	ret = __hci_request(hdev, req, opt, timeout);
169 	hci_req_unlock(hdev);
170 
171 	return ret;
172 }
173 
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176 	BT_DBG("%s %ld", hdev->name, opt);
177 
178 	/* Reset device */
179 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180 }
181 
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184 	struct sk_buff *skb;
185 	__le16 param;
186 	__u8 flt_type;
187 
188 	BT_DBG("%s %ld", hdev->name, opt);
189 
190 	/* Driver initialization */
191 
192 	/* Special commands */
193 	while ((skb = skb_dequeue(&hdev->driver_init))) {
194 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 		skb->dev = (void *) hdev;
196 		skb_queue_tail(&hdev->cmd_q, skb);
197 		hci_sched_cmd(hdev);
198 	}
199 	skb_queue_purge(&hdev->driver_init);
200 
201 	/* Mandatory initialization */
202 
203 	/* Reset */
204 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
205 			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 
207 	/* Read Local Supported Features */
208 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 
210 	/* Read Local Version */
211 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 
213 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 
216 #if 0
217 	/* Host buffer size */
218 	{
219 		struct hci_cp_host_buffer_size cp;
220 		cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221 		cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 		cp.acl_max_pkt = cpu_to_le16(0xffff);
223 		cp.sco_max_pkt = cpu_to_le16(0xffff);
224 		hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225 	}
226 #endif
227 
228 	/* Read BD Address */
229 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230 
231 	/* Read Class of Device */
232 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233 
234 	/* Read Local Name */
235 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
236 
237 	/* Read Voice Setting */
238 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
239 
240 	/* Optional initialization */
241 
242 	/* Clear Event Filters */
243 	flt_type = HCI_FLT_CLEAR_ALL;
244 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
245 
246 	/* Page timeout ~20 secs */
247 	param = cpu_to_le16(0x8000);
248 	hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
249 
250 	/* Connection accept timeout ~20 secs */
251 	param = cpu_to_le16(0x7d00);
252 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
253 }
254 
255 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
256 {
257 	__u8 scan = opt;
258 
259 	BT_DBG("%s %x", hdev->name, scan);
260 
261 	/* Inquiry and Page scans */
262 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
263 }
264 
265 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
266 {
267 	__u8 auth = opt;
268 
269 	BT_DBG("%s %x", hdev->name, auth);
270 
271 	/* Authentication */
272 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
273 }
274 
275 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
276 {
277 	__u8 encrypt = opt;
278 
279 	BT_DBG("%s %x", hdev->name, encrypt);
280 
281 	/* Encryption */
282 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
283 }
284 
285 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
286 {
287 	__le16 policy = cpu_to_le16(opt);
288 
289 	BT_DBG("%s %x", hdev->name, policy);
290 
291 	/* Default link policy */
292 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
293 }
294 
295 /* Get HCI device by index.
296  * Device is held on return. */
297 struct hci_dev *hci_dev_get(int index)
298 {
299 	struct hci_dev *hdev = NULL;
300 	struct list_head *p;
301 
302 	BT_DBG("%d", index);
303 
304 	if (index < 0)
305 		return NULL;
306 
307 	read_lock(&hci_dev_list_lock);
308 	list_for_each(p, &hci_dev_list) {
309 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
310 		if (d->id == index) {
311 			hdev = hci_dev_hold(d);
312 			break;
313 		}
314 	}
315 	read_unlock(&hci_dev_list_lock);
316 	return hdev;
317 }
318 
319 /* ---- Inquiry support ---- */
320 static void inquiry_cache_flush(struct hci_dev *hdev)
321 {
322 	struct inquiry_cache *cache = &hdev->inq_cache;
323 	struct inquiry_entry *next  = cache->list, *e;
324 
325 	BT_DBG("cache %p", cache);
326 
327 	cache->list = NULL;
328 	while ((e = next)) {
329 		next = e->next;
330 		kfree(e);
331 	}
332 }
333 
334 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
335 {
336 	struct inquiry_cache *cache = &hdev->inq_cache;
337 	struct inquiry_entry *e;
338 
339 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
340 
341 	for (e = cache->list; e; e = e->next)
342 		if (!bacmp(&e->data.bdaddr, bdaddr))
343 			break;
344 	return e;
345 }
346 
347 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
348 {
349 	struct inquiry_cache *cache = &hdev->inq_cache;
350 	struct inquiry_entry *e;
351 
352 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
353 
354 	if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
355 		/* Entry not in the cache. Add new one. */
356 		if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
357 			return;
358 		e->next     = cache->list;
359 		cache->list = e;
360 	}
361 
362 	memcpy(&e->data, data, sizeof(*data));
363 	e->timestamp = jiffies;
364 	cache->timestamp = jiffies;
365 }
366 
367 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
368 {
369 	struct inquiry_cache *cache = &hdev->inq_cache;
370 	struct inquiry_info *info = (struct inquiry_info *) buf;
371 	struct inquiry_entry *e;
372 	int copied = 0;
373 
374 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
375 		struct inquiry_data *data = &e->data;
376 		bacpy(&info->bdaddr, &data->bdaddr);
377 		info->pscan_rep_mode	= data->pscan_rep_mode;
378 		info->pscan_period_mode	= data->pscan_period_mode;
379 		info->pscan_mode	= data->pscan_mode;
380 		memcpy(info->dev_class, data->dev_class, 3);
381 		info->clock_offset	= data->clock_offset;
382 		info++;
383 	}
384 
385 	BT_DBG("cache %p, copied %d", cache, copied);
386 	return copied;
387 }
388 
389 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
390 {
391 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
392 	struct hci_cp_inquiry cp;
393 
394 	BT_DBG("%s", hdev->name);
395 
396 	if (test_bit(HCI_INQUIRY, &hdev->flags))
397 		return;
398 
399 	/* Start Inquiry */
400 	memcpy(&cp.lap, &ir->lap, 3);
401 	cp.length  = ir->length;
402 	cp.num_rsp = ir->num_rsp;
403 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
404 }
405 
406 int hci_inquiry(void __user *arg)
407 {
408 	__u8 __user *ptr = arg;
409 	struct hci_inquiry_req ir;
410 	struct hci_dev *hdev;
411 	int err = 0, do_inquiry = 0, max_rsp;
412 	long timeo;
413 	__u8 *buf;
414 
415 	if (copy_from_user(&ir, ptr, sizeof(ir)))
416 		return -EFAULT;
417 
418 	if (!(hdev = hci_dev_get(ir.dev_id)))
419 		return -ENODEV;
420 
421 	hci_dev_lock_bh(hdev);
422 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
423 					inquiry_cache_empty(hdev) ||
424 					ir.flags & IREQ_CACHE_FLUSH) {
425 		inquiry_cache_flush(hdev);
426 		do_inquiry = 1;
427 	}
428 	hci_dev_unlock_bh(hdev);
429 
430 	timeo = ir.length * msecs_to_jiffies(2000);
431 	if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
432 		goto done;
433 
434 	/* for unlimited number of responses we will use buffer with 255 entries */
435 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
436 
437 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
438 	 * copy it to the user space.
439 	 */
440 	if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
441 		err = -ENOMEM;
442 		goto done;
443 	}
444 
445 	hci_dev_lock_bh(hdev);
446 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
447 	hci_dev_unlock_bh(hdev);
448 
449 	BT_DBG("num_rsp %d", ir.num_rsp);
450 
451 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
452 		ptr += sizeof(ir);
453 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
454 					ir.num_rsp))
455 			err = -EFAULT;
456 	} else
457 		err = -EFAULT;
458 
459 	kfree(buf);
460 
461 done:
462 	hci_dev_put(hdev);
463 	return err;
464 }
465 
466 /* ---- HCI ioctl helpers ---- */
467 
468 int hci_dev_open(__u16 dev)
469 {
470 	struct hci_dev *hdev;
471 	int ret = 0;
472 
473 	if (!(hdev = hci_dev_get(dev)))
474 		return -ENODEV;
475 
476 	BT_DBG("%s %p", hdev->name, hdev);
477 
478 	hci_req_lock(hdev);
479 
480 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
481 		ret = -ERFKILL;
482 		goto done;
483 	}
484 
485 	if (test_bit(HCI_UP, &hdev->flags)) {
486 		ret = -EALREADY;
487 		goto done;
488 	}
489 
490 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491 		set_bit(HCI_RAW, &hdev->flags);
492 
493 	if (hdev->open(hdev)) {
494 		ret = -EIO;
495 		goto done;
496 	}
497 
498 	if (!test_bit(HCI_RAW, &hdev->flags)) {
499 		atomic_set(&hdev->cmd_cnt, 1);
500 		set_bit(HCI_INIT, &hdev->flags);
501 
502 		//__hci_request(hdev, hci_reset_req, 0, HZ);
503 		ret = __hci_request(hdev, hci_init_req, 0,
504 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
505 
506 		clear_bit(HCI_INIT, &hdev->flags);
507 	}
508 
509 	if (!ret) {
510 		hci_dev_hold(hdev);
511 		set_bit(HCI_UP, &hdev->flags);
512 		hci_notify(hdev, HCI_DEV_UP);
513 	} else {
514 		/* Init failed, cleanup */
515 		tasklet_kill(&hdev->rx_task);
516 		tasklet_kill(&hdev->tx_task);
517 		tasklet_kill(&hdev->cmd_task);
518 
519 		skb_queue_purge(&hdev->cmd_q);
520 		skb_queue_purge(&hdev->rx_q);
521 
522 		if (hdev->flush)
523 			hdev->flush(hdev);
524 
525 		if (hdev->sent_cmd) {
526 			kfree_skb(hdev->sent_cmd);
527 			hdev->sent_cmd = NULL;
528 		}
529 
530 		hdev->close(hdev);
531 		hdev->flags = 0;
532 	}
533 
534 done:
535 	hci_req_unlock(hdev);
536 	hci_dev_put(hdev);
537 	return ret;
538 }
539 
540 static int hci_dev_do_close(struct hci_dev *hdev)
541 {
542 	BT_DBG("%s %p", hdev->name, hdev);
543 
544 	hci_req_cancel(hdev, ENODEV);
545 	hci_req_lock(hdev);
546 
547 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
548 		hci_req_unlock(hdev);
549 		return 0;
550 	}
551 
552 	/* Kill RX and TX tasks */
553 	tasklet_kill(&hdev->rx_task);
554 	tasklet_kill(&hdev->tx_task);
555 
556 	hci_dev_lock_bh(hdev);
557 	inquiry_cache_flush(hdev);
558 	hci_conn_hash_flush(hdev);
559 	hci_dev_unlock_bh(hdev);
560 
561 	hci_notify(hdev, HCI_DEV_DOWN);
562 
563 	if (hdev->flush)
564 		hdev->flush(hdev);
565 
566 	/* Reset device */
567 	skb_queue_purge(&hdev->cmd_q);
568 	atomic_set(&hdev->cmd_cnt, 1);
569 	if (!test_bit(HCI_RAW, &hdev->flags)) {
570 		set_bit(HCI_INIT, &hdev->flags);
571 		__hci_request(hdev, hci_reset_req, 0,
572 					msecs_to_jiffies(250));
573 		clear_bit(HCI_INIT, &hdev->flags);
574 	}
575 
576 	/* Kill cmd task */
577 	tasklet_kill(&hdev->cmd_task);
578 
579 	/* Drop queues */
580 	skb_queue_purge(&hdev->rx_q);
581 	skb_queue_purge(&hdev->cmd_q);
582 	skb_queue_purge(&hdev->raw_q);
583 
584 	/* Drop last sent command */
585 	if (hdev->sent_cmd) {
586 		kfree_skb(hdev->sent_cmd);
587 		hdev->sent_cmd = NULL;
588 	}
589 
590 	/* After this point our queues are empty
591 	 * and no tasks are scheduled. */
592 	hdev->close(hdev);
593 
594 	/* Clear flags */
595 	hdev->flags = 0;
596 
597 	hci_req_unlock(hdev);
598 
599 	hci_dev_put(hdev);
600 	return 0;
601 }
602 
603 int hci_dev_close(__u16 dev)
604 {
605 	struct hci_dev *hdev;
606 	int err;
607 
608 	if (!(hdev = hci_dev_get(dev)))
609 		return -ENODEV;
610 	err = hci_dev_do_close(hdev);
611 	hci_dev_put(hdev);
612 	return err;
613 }
614 
615 int hci_dev_reset(__u16 dev)
616 {
617 	struct hci_dev *hdev;
618 	int ret = 0;
619 
620 	if (!(hdev = hci_dev_get(dev)))
621 		return -ENODEV;
622 
623 	hci_req_lock(hdev);
624 	tasklet_disable(&hdev->tx_task);
625 
626 	if (!test_bit(HCI_UP, &hdev->flags))
627 		goto done;
628 
629 	/* Drop queues */
630 	skb_queue_purge(&hdev->rx_q);
631 	skb_queue_purge(&hdev->cmd_q);
632 
633 	hci_dev_lock_bh(hdev);
634 	inquiry_cache_flush(hdev);
635 	hci_conn_hash_flush(hdev);
636 	hci_dev_unlock_bh(hdev);
637 
638 	if (hdev->flush)
639 		hdev->flush(hdev);
640 
641 	atomic_set(&hdev->cmd_cnt, 1);
642 	hdev->acl_cnt = 0; hdev->sco_cnt = 0;
643 
644 	if (!test_bit(HCI_RAW, &hdev->flags))
645 		ret = __hci_request(hdev, hci_reset_req, 0,
646 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
647 
648 done:
649 	tasklet_enable(&hdev->tx_task);
650 	hci_req_unlock(hdev);
651 	hci_dev_put(hdev);
652 	return ret;
653 }
654 
655 int hci_dev_reset_stat(__u16 dev)
656 {
657 	struct hci_dev *hdev;
658 	int ret = 0;
659 
660 	if (!(hdev = hci_dev_get(dev)))
661 		return -ENODEV;
662 
663 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
664 
665 	hci_dev_put(hdev);
666 
667 	return ret;
668 }
669 
670 int hci_dev_cmd(unsigned int cmd, void __user *arg)
671 {
672 	struct hci_dev *hdev;
673 	struct hci_dev_req dr;
674 	int err = 0;
675 
676 	if (copy_from_user(&dr, arg, sizeof(dr)))
677 		return -EFAULT;
678 
679 	if (!(hdev = hci_dev_get(dr.dev_id)))
680 		return -ENODEV;
681 
682 	switch (cmd) {
683 	case HCISETAUTH:
684 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
685 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
686 		break;
687 
688 	case HCISETENCRYPT:
689 		if (!lmp_encrypt_capable(hdev)) {
690 			err = -EOPNOTSUPP;
691 			break;
692 		}
693 
694 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
695 			/* Auth must be enabled first */
696 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
697 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 			if (err)
699 				break;
700 		}
701 
702 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
703 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
704 		break;
705 
706 	case HCISETSCAN:
707 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
708 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
709 		break;
710 
711 	case HCISETLINKPOL:
712 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
713 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
714 		break;
715 
716 	case HCISETLINKMODE:
717 		hdev->link_mode = ((__u16) dr.dev_opt) &
718 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
719 		break;
720 
721 	case HCISETPTYPE:
722 		hdev->pkt_type = (__u16) dr.dev_opt;
723 		break;
724 
725 	case HCISETACLMTU:
726 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
727 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
728 		break;
729 
730 	case HCISETSCOMTU:
731 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
732 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
733 		break;
734 
735 	default:
736 		err = -EINVAL;
737 		break;
738 	}
739 
740 	hci_dev_put(hdev);
741 	return err;
742 }
743 
744 int hci_get_dev_list(void __user *arg)
745 {
746 	struct hci_dev_list_req *dl;
747 	struct hci_dev_req *dr;
748 	struct list_head *p;
749 	int n = 0, size, err;
750 	__u16 dev_num;
751 
752 	if (get_user(dev_num, (__u16 __user *) arg))
753 		return -EFAULT;
754 
755 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
756 		return -EINVAL;
757 
758 	size = sizeof(*dl) + dev_num * sizeof(*dr);
759 
760 	if (!(dl = kzalloc(size, GFP_KERNEL)))
761 		return -ENOMEM;
762 
763 	dr = dl->dev_req;
764 
765 	read_lock_bh(&hci_dev_list_lock);
766 	list_for_each(p, &hci_dev_list) {
767 		struct hci_dev *hdev;
768 		hdev = list_entry(p, struct hci_dev, list);
769 		(dr + n)->dev_id  = hdev->id;
770 		(dr + n)->dev_opt = hdev->flags;
771 		if (++n >= dev_num)
772 			break;
773 	}
774 	read_unlock_bh(&hci_dev_list_lock);
775 
776 	dl->dev_num = n;
777 	size = sizeof(*dl) + n * sizeof(*dr);
778 
779 	err = copy_to_user(arg, dl, size);
780 	kfree(dl);
781 
782 	return err ? -EFAULT : 0;
783 }
784 
785 int hci_get_dev_info(void __user *arg)
786 {
787 	struct hci_dev *hdev;
788 	struct hci_dev_info di;
789 	int err = 0;
790 
791 	if (copy_from_user(&di, arg, sizeof(di)))
792 		return -EFAULT;
793 
794 	if (!(hdev = hci_dev_get(di.dev_id)))
795 		return -ENODEV;
796 
797 	strcpy(di.name, hdev->name);
798 	di.bdaddr   = hdev->bdaddr;
799 	di.type     = hdev->type;
800 	di.flags    = hdev->flags;
801 	di.pkt_type = hdev->pkt_type;
802 	di.acl_mtu  = hdev->acl_mtu;
803 	di.acl_pkts = hdev->acl_pkts;
804 	di.sco_mtu  = hdev->sco_mtu;
805 	di.sco_pkts = hdev->sco_pkts;
806 	di.link_policy = hdev->link_policy;
807 	di.link_mode   = hdev->link_mode;
808 
809 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
810 	memcpy(&di.features, &hdev->features, sizeof(di.features));
811 
812 	if (copy_to_user(arg, &di, sizeof(di)))
813 		err = -EFAULT;
814 
815 	hci_dev_put(hdev);
816 
817 	return err;
818 }
819 
820 /* ---- Interface to HCI drivers ---- */
821 
822 static int hci_rfkill_set_block(void *data, bool blocked)
823 {
824 	struct hci_dev *hdev = data;
825 
826 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
827 
828 	if (!blocked)
829 		return 0;
830 
831 	hci_dev_do_close(hdev);
832 
833 	return 0;
834 }
835 
836 static const struct rfkill_ops hci_rfkill_ops = {
837 	.set_block = hci_rfkill_set_block,
838 };
839 
840 /* Alloc HCI device */
841 struct hci_dev *hci_alloc_dev(void)
842 {
843 	struct hci_dev *hdev;
844 
845 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
846 	if (!hdev)
847 		return NULL;
848 
849 	skb_queue_head_init(&hdev->driver_init);
850 
851 	return hdev;
852 }
853 EXPORT_SYMBOL(hci_alloc_dev);
854 
855 /* Free HCI device */
856 void hci_free_dev(struct hci_dev *hdev)
857 {
858 	skb_queue_purge(&hdev->driver_init);
859 
860 	/* will free via device release */
861 	put_device(&hdev->dev);
862 }
863 EXPORT_SYMBOL(hci_free_dev);
864 
865 /* Register HCI device */
866 int hci_register_dev(struct hci_dev *hdev)
867 {
868 	struct list_head *head = &hci_dev_list, *p;
869 	int i, id = 0;
870 
871 	BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
872 						hdev->type, hdev->owner);
873 
874 	if (!hdev->open || !hdev->close || !hdev->destruct)
875 		return -EINVAL;
876 
877 	write_lock_bh(&hci_dev_list_lock);
878 
879 	/* Find first available device id */
880 	list_for_each(p, &hci_dev_list) {
881 		if (list_entry(p, struct hci_dev, list)->id != id)
882 			break;
883 		head = p; id++;
884 	}
885 
886 	sprintf(hdev->name, "hci%d", id);
887 	hdev->id = id;
888 	list_add(&hdev->list, head);
889 
890 	atomic_set(&hdev->refcnt, 1);
891 	spin_lock_init(&hdev->lock);
892 
893 	hdev->flags = 0;
894 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
895 	hdev->esco_type = (ESCO_HV1);
896 	hdev->link_mode = (HCI_LM_ACCEPT);
897 
898 	hdev->idle_timeout = 0;
899 	hdev->sniff_max_interval = 800;
900 	hdev->sniff_min_interval = 80;
901 
902 	tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
903 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
904 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
905 
906 	skb_queue_head_init(&hdev->rx_q);
907 	skb_queue_head_init(&hdev->cmd_q);
908 	skb_queue_head_init(&hdev->raw_q);
909 
910 	for (i = 0; i < 3; i++)
911 		hdev->reassembly[i] = NULL;
912 
913 	init_waitqueue_head(&hdev->req_wait_q);
914 	mutex_init(&hdev->req_lock);
915 
916 	inquiry_cache_init(hdev);
917 
918 	hci_conn_hash_init(hdev);
919 
920 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
921 
922 	atomic_set(&hdev->promisc, 0);
923 
924 	write_unlock_bh(&hci_dev_list_lock);
925 
926 	hci_register_sysfs(hdev);
927 
928 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
929 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
930 	if (hdev->rfkill) {
931 		if (rfkill_register(hdev->rfkill) < 0) {
932 			rfkill_destroy(hdev->rfkill);
933 			hdev->rfkill = NULL;
934 		}
935 	}
936 
937 	hci_notify(hdev, HCI_DEV_REG);
938 
939 	return id;
940 }
941 EXPORT_SYMBOL(hci_register_dev);
942 
943 /* Unregister HCI device */
944 int hci_unregister_dev(struct hci_dev *hdev)
945 {
946 	int i;
947 
948 	BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
949 
950 	write_lock_bh(&hci_dev_list_lock);
951 	list_del(&hdev->list);
952 	write_unlock_bh(&hci_dev_list_lock);
953 
954 	hci_dev_do_close(hdev);
955 
956 	for (i = 0; i < 3; i++)
957 		kfree_skb(hdev->reassembly[i]);
958 
959 	hci_notify(hdev, HCI_DEV_UNREG);
960 
961 	if (hdev->rfkill) {
962 		rfkill_unregister(hdev->rfkill);
963 		rfkill_destroy(hdev->rfkill);
964 	}
965 
966 	hci_unregister_sysfs(hdev);
967 
968 	__hci_dev_put(hdev);
969 
970 	return 0;
971 }
972 EXPORT_SYMBOL(hci_unregister_dev);
973 
974 /* Suspend HCI device */
975 int hci_suspend_dev(struct hci_dev *hdev)
976 {
977 	hci_notify(hdev, HCI_DEV_SUSPEND);
978 	return 0;
979 }
980 EXPORT_SYMBOL(hci_suspend_dev);
981 
982 /* Resume HCI device */
983 int hci_resume_dev(struct hci_dev *hdev)
984 {
985 	hci_notify(hdev, HCI_DEV_RESUME);
986 	return 0;
987 }
988 EXPORT_SYMBOL(hci_resume_dev);
989 
990 /* Receive packet type fragment */
991 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
992 
993 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
994 {
995 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
996 		return -EILSEQ;
997 
998 	while (count) {
999 		struct sk_buff *skb = __reassembly(hdev, type);
1000 		struct { int expect; } *scb;
1001 		int len = 0;
1002 
1003 		if (!skb) {
1004 			/* Start of the frame */
1005 
1006 			switch (type) {
1007 			case HCI_EVENT_PKT:
1008 				if (count >= HCI_EVENT_HDR_SIZE) {
1009 					struct hci_event_hdr *h = data;
1010 					len = HCI_EVENT_HDR_SIZE + h->plen;
1011 				} else
1012 					return -EILSEQ;
1013 				break;
1014 
1015 			case HCI_ACLDATA_PKT:
1016 				if (count >= HCI_ACL_HDR_SIZE) {
1017 					struct hci_acl_hdr *h = data;
1018 					len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1019 				} else
1020 					return -EILSEQ;
1021 				break;
1022 
1023 			case HCI_SCODATA_PKT:
1024 				if (count >= HCI_SCO_HDR_SIZE) {
1025 					struct hci_sco_hdr *h = data;
1026 					len = HCI_SCO_HDR_SIZE + h->dlen;
1027 				} else
1028 					return -EILSEQ;
1029 				break;
1030 			}
1031 
1032 			skb = bt_skb_alloc(len, GFP_ATOMIC);
1033 			if (!skb) {
1034 				BT_ERR("%s no memory for packet", hdev->name);
1035 				return -ENOMEM;
1036 			}
1037 
1038 			skb->dev = (void *) hdev;
1039 			bt_cb(skb)->pkt_type = type;
1040 
1041 			__reassembly(hdev, type) = skb;
1042 
1043 			scb = (void *) skb->cb;
1044 			scb->expect = len;
1045 		} else {
1046 			/* Continuation */
1047 
1048 			scb = (void *) skb->cb;
1049 			len = scb->expect;
1050 		}
1051 
1052 		len = min(len, count);
1053 
1054 		memcpy(skb_put(skb, len), data, len);
1055 
1056 		scb->expect -= len;
1057 
1058 		if (scb->expect == 0) {
1059 			/* Complete frame */
1060 
1061 			__reassembly(hdev, type) = NULL;
1062 
1063 			bt_cb(skb)->pkt_type = type;
1064 			hci_recv_frame(skb);
1065 		}
1066 
1067 		count -= len; data += len;
1068 	}
1069 
1070 	return 0;
1071 }
1072 EXPORT_SYMBOL(hci_recv_fragment);
1073 
1074 /* ---- Interface to upper protocols ---- */
1075 
1076 /* Register/Unregister protocols.
1077  * hci_task_lock is used to ensure that no tasks are running. */
1078 int hci_register_proto(struct hci_proto *hp)
1079 {
1080 	int err = 0;
1081 
1082 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1083 
1084 	if (hp->id >= HCI_MAX_PROTO)
1085 		return -EINVAL;
1086 
1087 	write_lock_bh(&hci_task_lock);
1088 
1089 	if (!hci_proto[hp->id])
1090 		hci_proto[hp->id] = hp;
1091 	else
1092 		err = -EEXIST;
1093 
1094 	write_unlock_bh(&hci_task_lock);
1095 
1096 	return err;
1097 }
1098 EXPORT_SYMBOL(hci_register_proto);
1099 
1100 int hci_unregister_proto(struct hci_proto *hp)
1101 {
1102 	int err = 0;
1103 
1104 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1105 
1106 	if (hp->id >= HCI_MAX_PROTO)
1107 		return -EINVAL;
1108 
1109 	write_lock_bh(&hci_task_lock);
1110 
1111 	if (hci_proto[hp->id])
1112 		hci_proto[hp->id] = NULL;
1113 	else
1114 		err = -ENOENT;
1115 
1116 	write_unlock_bh(&hci_task_lock);
1117 
1118 	return err;
1119 }
1120 EXPORT_SYMBOL(hci_unregister_proto);
1121 
1122 int hci_register_cb(struct hci_cb *cb)
1123 {
1124 	BT_DBG("%p name %s", cb, cb->name);
1125 
1126 	write_lock_bh(&hci_cb_list_lock);
1127 	list_add(&cb->list, &hci_cb_list);
1128 	write_unlock_bh(&hci_cb_list_lock);
1129 
1130 	return 0;
1131 }
1132 EXPORT_SYMBOL(hci_register_cb);
1133 
1134 int hci_unregister_cb(struct hci_cb *cb)
1135 {
1136 	BT_DBG("%p name %s", cb, cb->name);
1137 
1138 	write_lock_bh(&hci_cb_list_lock);
1139 	list_del(&cb->list);
1140 	write_unlock_bh(&hci_cb_list_lock);
1141 
1142 	return 0;
1143 }
1144 EXPORT_SYMBOL(hci_unregister_cb);
1145 
1146 static int hci_send_frame(struct sk_buff *skb)
1147 {
1148 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1149 
1150 	if (!hdev) {
1151 		kfree_skb(skb);
1152 		return -ENODEV;
1153 	}
1154 
1155 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1156 
1157 	if (atomic_read(&hdev->promisc)) {
1158 		/* Time stamp */
1159 		__net_timestamp(skb);
1160 
1161 		hci_send_to_sock(hdev, skb);
1162 	}
1163 
1164 	/* Get rid of skb owner, prior to sending to the driver. */
1165 	skb_orphan(skb);
1166 
1167 	return hdev->send(skb);
1168 }
1169 
1170 /* Send HCI command */
1171 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1172 {
1173 	int len = HCI_COMMAND_HDR_SIZE + plen;
1174 	struct hci_command_hdr *hdr;
1175 	struct sk_buff *skb;
1176 
1177 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1178 
1179 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1180 	if (!skb) {
1181 		BT_ERR("%s no memory for command", hdev->name);
1182 		return -ENOMEM;
1183 	}
1184 
1185 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1186 	hdr->opcode = cpu_to_le16(opcode);
1187 	hdr->plen   = plen;
1188 
1189 	if (plen)
1190 		memcpy(skb_put(skb, plen), param, plen);
1191 
1192 	BT_DBG("skb len %d", skb->len);
1193 
1194 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1195 	skb->dev = (void *) hdev;
1196 	skb_queue_tail(&hdev->cmd_q, skb);
1197 	hci_sched_cmd(hdev);
1198 
1199 	return 0;
1200 }
1201 
1202 /* Get data from the previously sent command */
1203 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1204 {
1205 	struct hci_command_hdr *hdr;
1206 
1207 	if (!hdev->sent_cmd)
1208 		return NULL;
1209 
1210 	hdr = (void *) hdev->sent_cmd->data;
1211 
1212 	if (hdr->opcode != cpu_to_le16(opcode))
1213 		return NULL;
1214 
1215 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1216 
1217 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1218 }
1219 
1220 /* Send ACL data */
1221 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1222 {
1223 	struct hci_acl_hdr *hdr;
1224 	int len = skb->len;
1225 
1226 	skb_push(skb, HCI_ACL_HDR_SIZE);
1227 	skb_reset_transport_header(skb);
1228 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1229 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1230 	hdr->dlen   = cpu_to_le16(len);
1231 }
1232 
1233 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1234 {
1235 	struct hci_dev *hdev = conn->hdev;
1236 	struct sk_buff *list;
1237 
1238 	BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1239 
1240 	skb->dev = (void *) hdev;
1241 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1242 	hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1243 
1244 	if (!(list = skb_shinfo(skb)->frag_list)) {
1245 		/* Non fragmented */
1246 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1247 
1248 		skb_queue_tail(&conn->data_q, skb);
1249 	} else {
1250 		/* Fragmented */
1251 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1252 
1253 		skb_shinfo(skb)->frag_list = NULL;
1254 
1255 		/* Queue all fragments atomically */
1256 		spin_lock_bh(&conn->data_q.lock);
1257 
1258 		__skb_queue_tail(&conn->data_q, skb);
1259 		do {
1260 			skb = list; list = list->next;
1261 
1262 			skb->dev = (void *) hdev;
1263 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1264 			hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1265 
1266 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1267 
1268 			__skb_queue_tail(&conn->data_q, skb);
1269 		} while (list);
1270 
1271 		spin_unlock_bh(&conn->data_q.lock);
1272 	}
1273 
1274 	hci_sched_tx(hdev);
1275 	return 0;
1276 }
1277 EXPORT_SYMBOL(hci_send_acl);
1278 
1279 /* Send SCO data */
1280 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1281 {
1282 	struct hci_dev *hdev = conn->hdev;
1283 	struct hci_sco_hdr hdr;
1284 
1285 	BT_DBG("%s len %d", hdev->name, skb->len);
1286 
1287 	if (skb->len > hdev->sco_mtu) {
1288 		kfree_skb(skb);
1289 		return -EINVAL;
1290 	}
1291 
1292 	hdr.handle = cpu_to_le16(conn->handle);
1293 	hdr.dlen   = skb->len;
1294 
1295 	skb_push(skb, HCI_SCO_HDR_SIZE);
1296 	skb_reset_transport_header(skb);
1297 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1298 
1299 	skb->dev = (void *) hdev;
1300 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1301 	skb_queue_tail(&conn->data_q, skb);
1302 	hci_sched_tx(hdev);
1303 	return 0;
1304 }
1305 EXPORT_SYMBOL(hci_send_sco);
1306 
1307 /* ---- HCI TX task (outgoing data) ---- */
1308 
1309 /* HCI Connection scheduler */
1310 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1311 {
1312 	struct hci_conn_hash *h = &hdev->conn_hash;
1313 	struct hci_conn *conn = NULL;
1314 	int num = 0, min = ~0;
1315 	struct list_head *p;
1316 
1317 	/* We don't have to lock device here. Connections are always
1318 	 * added and removed with TX task disabled. */
1319 	list_for_each(p, &h->list) {
1320 		struct hci_conn *c;
1321 		c = list_entry(p, struct hci_conn, list);
1322 
1323 		if (c->type != type || skb_queue_empty(&c->data_q))
1324 			continue;
1325 
1326 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1327 			continue;
1328 
1329 		num++;
1330 
1331 		if (c->sent < min) {
1332 			min  = c->sent;
1333 			conn = c;
1334 		}
1335 	}
1336 
1337 	if (conn) {
1338 		int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1339 		int q = cnt / num;
1340 		*quote = q ? q : 1;
1341 	} else
1342 		*quote = 0;
1343 
1344 	BT_DBG("conn %p quote %d", conn, *quote);
1345 	return conn;
1346 }
1347 
1348 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1349 {
1350 	struct hci_conn_hash *h = &hdev->conn_hash;
1351 	struct list_head *p;
1352 	struct hci_conn  *c;
1353 
1354 	BT_ERR("%s ACL tx timeout", hdev->name);
1355 
1356 	/* Kill stalled connections */
1357 	list_for_each(p, &h->list) {
1358 		c = list_entry(p, struct hci_conn, list);
1359 		if (c->type == ACL_LINK && c->sent) {
1360 			BT_ERR("%s killing stalled ACL connection %s",
1361 				hdev->name, batostr(&c->dst));
1362 			hci_acl_disconn(c, 0x13);
1363 		}
1364 	}
1365 }
1366 
1367 static inline void hci_sched_acl(struct hci_dev *hdev)
1368 {
1369 	struct hci_conn *conn;
1370 	struct sk_buff *skb;
1371 	int quote;
1372 
1373 	BT_DBG("%s", hdev->name);
1374 
1375 	if (!test_bit(HCI_RAW, &hdev->flags)) {
1376 		/* ACL tx timeout must be longer than maximum
1377 		 * link supervision timeout (40.9 seconds) */
1378 		if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1379 			hci_acl_tx_to(hdev);
1380 	}
1381 
1382 	while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1383 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1384 			BT_DBG("skb %p len %d", skb, skb->len);
1385 
1386 			hci_conn_enter_active_mode(conn);
1387 
1388 			hci_send_frame(skb);
1389 			hdev->acl_last_tx = jiffies;
1390 
1391 			hdev->acl_cnt--;
1392 			conn->sent++;
1393 		}
1394 	}
1395 }
1396 
1397 /* Schedule SCO */
1398 static inline void hci_sched_sco(struct hci_dev *hdev)
1399 {
1400 	struct hci_conn *conn;
1401 	struct sk_buff *skb;
1402 	int quote;
1403 
1404 	BT_DBG("%s", hdev->name);
1405 
1406 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1407 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1408 			BT_DBG("skb %p len %d", skb, skb->len);
1409 			hci_send_frame(skb);
1410 
1411 			conn->sent++;
1412 			if (conn->sent == ~0)
1413 				conn->sent = 0;
1414 		}
1415 	}
1416 }
1417 
1418 static inline void hci_sched_esco(struct hci_dev *hdev)
1419 {
1420 	struct hci_conn *conn;
1421 	struct sk_buff *skb;
1422 	int quote;
1423 
1424 	BT_DBG("%s", hdev->name);
1425 
1426 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1427 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1428 			BT_DBG("skb %p len %d", skb, skb->len);
1429 			hci_send_frame(skb);
1430 
1431 			conn->sent++;
1432 			if (conn->sent == ~0)
1433 				conn->sent = 0;
1434 		}
1435 	}
1436 }
1437 
1438 static void hci_tx_task(unsigned long arg)
1439 {
1440 	struct hci_dev *hdev = (struct hci_dev *) arg;
1441 	struct sk_buff *skb;
1442 
1443 	read_lock(&hci_task_lock);
1444 
1445 	BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1446 
1447 	/* Schedule queues and send stuff to HCI driver */
1448 
1449 	hci_sched_acl(hdev);
1450 
1451 	hci_sched_sco(hdev);
1452 
1453 	hci_sched_esco(hdev);
1454 
1455 	/* Send next queued raw (unknown type) packet */
1456 	while ((skb = skb_dequeue(&hdev->raw_q)))
1457 		hci_send_frame(skb);
1458 
1459 	read_unlock(&hci_task_lock);
1460 }
1461 
1462 /* ----- HCI RX task (incoming data proccessing) ----- */
1463 
1464 /* ACL data packet */
1465 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1466 {
1467 	struct hci_acl_hdr *hdr = (void *) skb->data;
1468 	struct hci_conn *conn;
1469 	__u16 handle, flags;
1470 
1471 	skb_pull(skb, HCI_ACL_HDR_SIZE);
1472 
1473 	handle = __le16_to_cpu(hdr->handle);
1474 	flags  = hci_flags(handle);
1475 	handle = hci_handle(handle);
1476 
1477 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1478 
1479 	hdev->stat.acl_rx++;
1480 
1481 	hci_dev_lock(hdev);
1482 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1483 	hci_dev_unlock(hdev);
1484 
1485 	if (conn) {
1486 		register struct hci_proto *hp;
1487 
1488 		hci_conn_enter_active_mode(conn);
1489 
1490 		/* Send to upper protocol */
1491 		if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1492 			hp->recv_acldata(conn, skb, flags);
1493 			return;
1494 		}
1495 	} else {
1496 		BT_ERR("%s ACL packet for unknown connection handle %d",
1497 			hdev->name, handle);
1498 	}
1499 
1500 	kfree_skb(skb);
1501 }
1502 
1503 /* SCO data packet */
1504 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1505 {
1506 	struct hci_sco_hdr *hdr = (void *) skb->data;
1507 	struct hci_conn *conn;
1508 	__u16 handle;
1509 
1510 	skb_pull(skb, HCI_SCO_HDR_SIZE);
1511 
1512 	handle = __le16_to_cpu(hdr->handle);
1513 
1514 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1515 
1516 	hdev->stat.sco_rx++;
1517 
1518 	hci_dev_lock(hdev);
1519 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1520 	hci_dev_unlock(hdev);
1521 
1522 	if (conn) {
1523 		register struct hci_proto *hp;
1524 
1525 		/* Send to upper protocol */
1526 		if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1527 			hp->recv_scodata(conn, skb);
1528 			return;
1529 		}
1530 	} else {
1531 		BT_ERR("%s SCO packet for unknown connection handle %d",
1532 			hdev->name, handle);
1533 	}
1534 
1535 	kfree_skb(skb);
1536 }
1537 
1538 static void hci_rx_task(unsigned long arg)
1539 {
1540 	struct hci_dev *hdev = (struct hci_dev *) arg;
1541 	struct sk_buff *skb;
1542 
1543 	BT_DBG("%s", hdev->name);
1544 
1545 	read_lock(&hci_task_lock);
1546 
1547 	while ((skb = skb_dequeue(&hdev->rx_q))) {
1548 		if (atomic_read(&hdev->promisc)) {
1549 			/* Send copy to the sockets */
1550 			hci_send_to_sock(hdev, skb);
1551 		}
1552 
1553 		if (test_bit(HCI_RAW, &hdev->flags)) {
1554 			kfree_skb(skb);
1555 			continue;
1556 		}
1557 
1558 		if (test_bit(HCI_INIT, &hdev->flags)) {
1559 			/* Don't process data packets in this states. */
1560 			switch (bt_cb(skb)->pkt_type) {
1561 			case HCI_ACLDATA_PKT:
1562 			case HCI_SCODATA_PKT:
1563 				kfree_skb(skb);
1564 				continue;
1565 			}
1566 		}
1567 
1568 		/* Process frame */
1569 		switch (bt_cb(skb)->pkt_type) {
1570 		case HCI_EVENT_PKT:
1571 			hci_event_packet(hdev, skb);
1572 			break;
1573 
1574 		case HCI_ACLDATA_PKT:
1575 			BT_DBG("%s ACL data packet", hdev->name);
1576 			hci_acldata_packet(hdev, skb);
1577 			break;
1578 
1579 		case HCI_SCODATA_PKT:
1580 			BT_DBG("%s SCO data packet", hdev->name);
1581 			hci_scodata_packet(hdev, skb);
1582 			break;
1583 
1584 		default:
1585 			kfree_skb(skb);
1586 			break;
1587 		}
1588 	}
1589 
1590 	read_unlock(&hci_task_lock);
1591 }
1592 
1593 static void hci_cmd_task(unsigned long arg)
1594 {
1595 	struct hci_dev *hdev = (struct hci_dev *) arg;
1596 	struct sk_buff *skb;
1597 
1598 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1599 
1600 	if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1601 		BT_ERR("%s command tx timeout", hdev->name);
1602 		atomic_set(&hdev->cmd_cnt, 1);
1603 	}
1604 
1605 	/* Send queued commands */
1606 	if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1607 		kfree_skb(hdev->sent_cmd);
1608 
1609 		if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1610 			atomic_dec(&hdev->cmd_cnt);
1611 			hci_send_frame(skb);
1612 			hdev->cmd_last_tx = jiffies;
1613 		} else {
1614 			skb_queue_head(&hdev->cmd_q, skb);
1615 			hci_sched_cmd(hdev);
1616 		}
1617 	}
1618 }
1619