xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 7490ca1e)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31 
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48 
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52 
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55 
56 #define AUTO_OFF_TIMEOUT 2000
57 
58 bool enable_hs;
59 
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63 
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67 
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71 
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74 
75 /* ---- HCI notifications ---- */
76 
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79 	return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81 
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86 
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91 
92 /* ---- HCI requests ---- */
93 
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97 
98 	/* If this is the init phase check if the completed command matches
99 	 * the last init command, and if not just return.
100 	 */
101 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 		return;
103 
104 	if (hdev->req_status == HCI_REQ_PEND) {
105 		hdev->req_result = result;
106 		hdev->req_status = HCI_REQ_DONE;
107 		wake_up_interruptible(&hdev->req_wait_q);
108 	}
109 }
110 
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
114 
115 	if (hdev->req_status == HCI_REQ_PEND) {
116 		hdev->req_result = err;
117 		hdev->req_status = HCI_REQ_CANCELED;
118 		wake_up_interruptible(&hdev->req_wait_q);
119 	}
120 }
121 
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 					unsigned long opt, __u32 timeout)
125 {
126 	DECLARE_WAITQUEUE(wait, current);
127 	int err = 0;
128 
129 	BT_DBG("%s start", hdev->name);
130 
131 	hdev->req_status = HCI_REQ_PEND;
132 
133 	add_wait_queue(&hdev->req_wait_q, &wait);
134 	set_current_state(TASK_INTERRUPTIBLE);
135 
136 	req(hdev, opt);
137 	schedule_timeout(timeout);
138 
139 	remove_wait_queue(&hdev->req_wait_q, &wait);
140 
141 	if (signal_pending(current))
142 		return -EINTR;
143 
144 	switch (hdev->req_status) {
145 	case HCI_REQ_DONE:
146 		err = -bt_to_errno(hdev->req_result);
147 		break;
148 
149 	case HCI_REQ_CANCELED:
150 		err = -hdev->req_result;
151 		break;
152 
153 	default:
154 		err = -ETIMEDOUT;
155 		break;
156 	}
157 
158 	hdev->req_status = hdev->req_result = 0;
159 
160 	BT_DBG("%s end: err %d", hdev->name, err);
161 
162 	return err;
163 }
164 
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 					unsigned long opt, __u32 timeout)
167 {
168 	int ret;
169 
170 	if (!test_bit(HCI_UP, &hdev->flags))
171 		return -ENETDOWN;
172 
173 	/* Serialize all requests */
174 	hci_req_lock(hdev);
175 	ret = __hci_request(hdev, req, opt, timeout);
176 	hci_req_unlock(hdev);
177 
178 	return ret;
179 }
180 
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183 	BT_DBG("%s %ld", hdev->name, opt);
184 
185 	/* Reset device */
186 	set_bit(HCI_RESET, &hdev->flags);
187 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189 
190 static void bredr_init(struct hci_dev *hdev)
191 {
192 	struct hci_cp_delete_stored_link_key cp;
193 	__le16 param;
194 	__u8 flt_type;
195 
196 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197 
198 	/* Mandatory initialization */
199 
200 	/* Reset */
201 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 		set_bit(HCI_RESET, &hdev->flags);
203 		hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204 	}
205 
206 	/* Read Local Supported Features */
207 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208 
209 	/* Read Local Version */
210 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211 
212 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214 
215 	/* Read BD Address */
216 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217 
218 	/* Read Class of Device */
219 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220 
221 	/* Read Local Name */
222 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223 
224 	/* Read Voice Setting */
225 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226 
227 	/* Optional initialization */
228 
229 	/* Clear Event Filters */
230 	flt_type = HCI_FLT_CLEAR_ALL;
231 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232 
233 	/* Connection accept timeout ~20 secs */
234 	param = cpu_to_le16(0x7d00);
235 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236 
237 	bacpy(&cp.bdaddr, BDADDR_ANY);
238 	cp.delete_all = 1;
239 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241 
242 static void amp_init(struct hci_dev *hdev)
243 {
244 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245 
246 	/* Reset */
247 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248 
249 	/* Read Local Version */
250 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252 
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255 	struct sk_buff *skb;
256 
257 	BT_DBG("%s %ld", hdev->name, opt);
258 
259 	/* Driver initialization */
260 
261 	/* Special commands */
262 	while ((skb = skb_dequeue(&hdev->driver_init))) {
263 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 		skb->dev = (void *) hdev;
265 
266 		skb_queue_tail(&hdev->cmd_q, skb);
267 		queue_work(hdev->workqueue, &hdev->cmd_work);
268 	}
269 	skb_queue_purge(&hdev->driver_init);
270 
271 	switch (hdev->dev_type) {
272 	case HCI_BREDR:
273 		bredr_init(hdev);
274 		break;
275 
276 	case HCI_AMP:
277 		amp_init(hdev);
278 		break;
279 
280 	default:
281 		BT_ERR("Unknown device type %d", hdev->dev_type);
282 		break;
283 	}
284 
285 }
286 
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289 	BT_DBG("%s", hdev->name);
290 
291 	/* Read LE buffer size */
292 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294 
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 	__u8 scan = opt;
298 
299 	BT_DBG("%s %x", hdev->name, scan);
300 
301 	/* Inquiry and Page scans */
302 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304 
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307 	__u8 auth = opt;
308 
309 	BT_DBG("%s %x", hdev->name, auth);
310 
311 	/* Authentication */
312 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314 
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317 	__u8 encrypt = opt;
318 
319 	BT_DBG("%s %x", hdev->name, encrypt);
320 
321 	/* Encryption */
322 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324 
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327 	__le16 policy = cpu_to_le16(opt);
328 
329 	BT_DBG("%s %x", hdev->name, policy);
330 
331 	/* Default link policy */
332 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334 
335 /* Get HCI device by index.
336  * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339 	struct hci_dev *hdev = NULL, *d;
340 
341 	BT_DBG("%d", index);
342 
343 	if (index < 0)
344 		return NULL;
345 
346 	read_lock(&hci_dev_list_lock);
347 	list_for_each_entry(d, &hci_dev_list, list) {
348 		if (d->id == index) {
349 			hdev = hci_dev_hold(d);
350 			break;
351 		}
352 	}
353 	read_unlock(&hci_dev_list_lock);
354 	return hdev;
355 }
356 
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev *hdev)
359 {
360 	struct inquiry_cache *cache = &hdev->inq_cache;
361 	struct inquiry_entry *next  = cache->list, *e;
362 
363 	BT_DBG("cache %p", cache);
364 
365 	cache->list = NULL;
366 	while ((e = next)) {
367 		next = e->next;
368 		kfree(e);
369 	}
370 }
371 
372 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373 {
374 	struct inquiry_cache *cache = &hdev->inq_cache;
375 	struct inquiry_entry *e;
376 
377 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378 
379 	for (e = cache->list; e; e = e->next)
380 		if (!bacmp(&e->data.bdaddr, bdaddr))
381 			break;
382 	return e;
383 }
384 
385 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
386 {
387 	struct inquiry_cache *cache = &hdev->inq_cache;
388 	struct inquiry_entry *ie;
389 
390 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
391 
392 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 	if (!ie) {
394 		/* Entry not in the cache. Add new one. */
395 		ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
396 		if (!ie)
397 			return;
398 
399 		ie->next = cache->list;
400 		cache->list = ie;
401 	}
402 
403 	memcpy(&ie->data, data, sizeof(*data));
404 	ie->timestamp = jiffies;
405 	cache->timestamp = jiffies;
406 }
407 
408 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
409 {
410 	struct inquiry_cache *cache = &hdev->inq_cache;
411 	struct inquiry_info *info = (struct inquiry_info *) buf;
412 	struct inquiry_entry *e;
413 	int copied = 0;
414 
415 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
416 		struct inquiry_data *data = &e->data;
417 		bacpy(&info->bdaddr, &data->bdaddr);
418 		info->pscan_rep_mode	= data->pscan_rep_mode;
419 		info->pscan_period_mode	= data->pscan_period_mode;
420 		info->pscan_mode	= data->pscan_mode;
421 		memcpy(info->dev_class, data->dev_class, 3);
422 		info->clock_offset	= data->clock_offset;
423 		info++;
424 	}
425 
426 	BT_DBG("cache %p, copied %d", cache, copied);
427 	return copied;
428 }
429 
430 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
431 {
432 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
433 	struct hci_cp_inquiry cp;
434 
435 	BT_DBG("%s", hdev->name);
436 
437 	if (test_bit(HCI_INQUIRY, &hdev->flags))
438 		return;
439 
440 	/* Start Inquiry */
441 	memcpy(&cp.lap, &ir->lap, 3);
442 	cp.length  = ir->length;
443 	cp.num_rsp = ir->num_rsp;
444 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
445 }
446 
447 int hci_inquiry(void __user *arg)
448 {
449 	__u8 __user *ptr = arg;
450 	struct hci_inquiry_req ir;
451 	struct hci_dev *hdev;
452 	int err = 0, do_inquiry = 0, max_rsp;
453 	long timeo;
454 	__u8 *buf;
455 
456 	if (copy_from_user(&ir, ptr, sizeof(ir)))
457 		return -EFAULT;
458 
459 	hdev = hci_dev_get(ir.dev_id);
460 	if (!hdev)
461 		return -ENODEV;
462 
463 	hci_dev_lock(hdev);
464 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
465 				inquiry_cache_empty(hdev) ||
466 				ir.flags & IREQ_CACHE_FLUSH) {
467 		inquiry_cache_flush(hdev);
468 		do_inquiry = 1;
469 	}
470 	hci_dev_unlock(hdev);
471 
472 	timeo = ir.length * msecs_to_jiffies(2000);
473 
474 	if (do_inquiry) {
475 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
476 		if (err < 0)
477 			goto done;
478 	}
479 
480 	/* for unlimited number of responses we will use buffer with 255 entries */
481 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
482 
483 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 	 * copy it to the user space.
485 	 */
486 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
487 	if (!buf) {
488 		err = -ENOMEM;
489 		goto done;
490 	}
491 
492 	hci_dev_lock(hdev);
493 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
494 	hci_dev_unlock(hdev);
495 
496 	BT_DBG("num_rsp %d", ir.num_rsp);
497 
498 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
499 		ptr += sizeof(ir);
500 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
501 					ir.num_rsp))
502 			err = -EFAULT;
503 	} else
504 		err = -EFAULT;
505 
506 	kfree(buf);
507 
508 done:
509 	hci_dev_put(hdev);
510 	return err;
511 }
512 
513 /* ---- HCI ioctl helpers ---- */
514 
515 int hci_dev_open(__u16 dev)
516 {
517 	struct hci_dev *hdev;
518 	int ret = 0;
519 
520 	hdev = hci_dev_get(dev);
521 	if (!hdev)
522 		return -ENODEV;
523 
524 	BT_DBG("%s %p", hdev->name, hdev);
525 
526 	hci_req_lock(hdev);
527 
528 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
529 		ret = -ERFKILL;
530 		goto done;
531 	}
532 
533 	if (test_bit(HCI_UP, &hdev->flags)) {
534 		ret = -EALREADY;
535 		goto done;
536 	}
537 
538 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
539 		set_bit(HCI_RAW, &hdev->flags);
540 
541 	/* Treat all non BR/EDR controllers as raw devices if
542 	   enable_hs is not set */
543 	if (hdev->dev_type != HCI_BREDR && !enable_hs)
544 		set_bit(HCI_RAW, &hdev->flags);
545 
546 	if (hdev->open(hdev)) {
547 		ret = -EIO;
548 		goto done;
549 	}
550 
551 	if (!test_bit(HCI_RAW, &hdev->flags)) {
552 		atomic_set(&hdev->cmd_cnt, 1);
553 		set_bit(HCI_INIT, &hdev->flags);
554 		hdev->init_last_cmd = 0;
555 
556 		ret = __hci_request(hdev, hci_init_req, 0,
557 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
558 
559 		if (lmp_host_le_capable(hdev))
560 			ret = __hci_request(hdev, hci_le_init_req, 0,
561 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
562 
563 		clear_bit(HCI_INIT, &hdev->flags);
564 	}
565 
566 	if (!ret) {
567 		hci_dev_hold(hdev);
568 		set_bit(HCI_UP, &hdev->flags);
569 		hci_notify(hdev, HCI_DEV_UP);
570 		if (!test_bit(HCI_SETUP, &hdev->flags)) {
571 			hci_dev_lock(hdev);
572 			mgmt_powered(hdev, 1);
573 			hci_dev_unlock(hdev);
574 		}
575 	} else {
576 		/* Init failed, cleanup */
577 		flush_work(&hdev->tx_work);
578 		flush_work(&hdev->cmd_work);
579 		flush_work(&hdev->rx_work);
580 
581 		skb_queue_purge(&hdev->cmd_q);
582 		skb_queue_purge(&hdev->rx_q);
583 
584 		if (hdev->flush)
585 			hdev->flush(hdev);
586 
587 		if (hdev->sent_cmd) {
588 			kfree_skb(hdev->sent_cmd);
589 			hdev->sent_cmd = NULL;
590 		}
591 
592 		hdev->close(hdev);
593 		hdev->flags = 0;
594 	}
595 
596 done:
597 	hci_req_unlock(hdev);
598 	hci_dev_put(hdev);
599 	return ret;
600 }
601 
602 static int hci_dev_do_close(struct hci_dev *hdev)
603 {
604 	BT_DBG("%s %p", hdev->name, hdev);
605 
606 	hci_req_cancel(hdev, ENODEV);
607 	hci_req_lock(hdev);
608 
609 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
610 		del_timer_sync(&hdev->cmd_timer);
611 		hci_req_unlock(hdev);
612 		return 0;
613 	}
614 
615 	/* Flush RX and TX works */
616 	flush_work(&hdev->tx_work);
617 	flush_work(&hdev->rx_work);
618 
619 	if (hdev->discov_timeout > 0) {
620 		cancel_delayed_work(&hdev->discov_off);
621 		hdev->discov_timeout = 0;
622 	}
623 
624 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
625 		cancel_delayed_work(&hdev->power_off);
626 
627 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
628 		cancel_delayed_work(&hdev->service_cache);
629 
630 	hci_dev_lock(hdev);
631 	inquiry_cache_flush(hdev);
632 	hci_conn_hash_flush(hdev);
633 	hci_dev_unlock(hdev);
634 
635 	hci_notify(hdev, HCI_DEV_DOWN);
636 
637 	if (hdev->flush)
638 		hdev->flush(hdev);
639 
640 	/* Reset device */
641 	skb_queue_purge(&hdev->cmd_q);
642 	atomic_set(&hdev->cmd_cnt, 1);
643 	if (!test_bit(HCI_RAW, &hdev->flags)) {
644 		set_bit(HCI_INIT, &hdev->flags);
645 		__hci_request(hdev, hci_reset_req, 0,
646 					msecs_to_jiffies(250));
647 		clear_bit(HCI_INIT, &hdev->flags);
648 	}
649 
650 	/* flush cmd  work */
651 	flush_work(&hdev->cmd_work);
652 
653 	/* Drop queues */
654 	skb_queue_purge(&hdev->rx_q);
655 	skb_queue_purge(&hdev->cmd_q);
656 	skb_queue_purge(&hdev->raw_q);
657 
658 	/* Drop last sent command */
659 	if (hdev->sent_cmd) {
660 		del_timer_sync(&hdev->cmd_timer);
661 		kfree_skb(hdev->sent_cmd);
662 		hdev->sent_cmd = NULL;
663 	}
664 
665 	/* After this point our queues are empty
666 	 * and no tasks are scheduled. */
667 	hdev->close(hdev);
668 
669 	hci_dev_lock(hdev);
670 	mgmt_powered(hdev, 0);
671 	hci_dev_unlock(hdev);
672 
673 	/* Clear flags */
674 	hdev->flags = 0;
675 
676 	hci_req_unlock(hdev);
677 
678 	hci_dev_put(hdev);
679 	return 0;
680 }
681 
682 int hci_dev_close(__u16 dev)
683 {
684 	struct hci_dev *hdev;
685 	int err;
686 
687 	hdev = hci_dev_get(dev);
688 	if (!hdev)
689 		return -ENODEV;
690 	err = hci_dev_do_close(hdev);
691 	hci_dev_put(hdev);
692 	return err;
693 }
694 
695 int hci_dev_reset(__u16 dev)
696 {
697 	struct hci_dev *hdev;
698 	int ret = 0;
699 
700 	hdev = hci_dev_get(dev);
701 	if (!hdev)
702 		return -ENODEV;
703 
704 	hci_req_lock(hdev);
705 
706 	if (!test_bit(HCI_UP, &hdev->flags))
707 		goto done;
708 
709 	/* Drop queues */
710 	skb_queue_purge(&hdev->rx_q);
711 	skb_queue_purge(&hdev->cmd_q);
712 
713 	hci_dev_lock(hdev);
714 	inquiry_cache_flush(hdev);
715 	hci_conn_hash_flush(hdev);
716 	hci_dev_unlock(hdev);
717 
718 	if (hdev->flush)
719 		hdev->flush(hdev);
720 
721 	atomic_set(&hdev->cmd_cnt, 1);
722 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
723 
724 	if (!test_bit(HCI_RAW, &hdev->flags))
725 		ret = __hci_request(hdev, hci_reset_req, 0,
726 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
727 
728 done:
729 	hci_req_unlock(hdev);
730 	hci_dev_put(hdev);
731 	return ret;
732 }
733 
734 int hci_dev_reset_stat(__u16 dev)
735 {
736 	struct hci_dev *hdev;
737 	int ret = 0;
738 
739 	hdev = hci_dev_get(dev);
740 	if (!hdev)
741 		return -ENODEV;
742 
743 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
744 
745 	hci_dev_put(hdev);
746 
747 	return ret;
748 }
749 
750 int hci_dev_cmd(unsigned int cmd, void __user *arg)
751 {
752 	struct hci_dev *hdev;
753 	struct hci_dev_req dr;
754 	int err = 0;
755 
756 	if (copy_from_user(&dr, arg, sizeof(dr)))
757 		return -EFAULT;
758 
759 	hdev = hci_dev_get(dr.dev_id);
760 	if (!hdev)
761 		return -ENODEV;
762 
763 	switch (cmd) {
764 	case HCISETAUTH:
765 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
766 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 		break;
768 
769 	case HCISETENCRYPT:
770 		if (!lmp_encrypt_capable(hdev)) {
771 			err = -EOPNOTSUPP;
772 			break;
773 		}
774 
775 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
776 			/* Auth must be enabled first */
777 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
778 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
779 			if (err)
780 				break;
781 		}
782 
783 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
784 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
785 		break;
786 
787 	case HCISETSCAN:
788 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
789 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
790 		break;
791 
792 	case HCISETLINKPOL:
793 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
794 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
795 		break;
796 
797 	case HCISETLINKMODE:
798 		hdev->link_mode = ((__u16) dr.dev_opt) &
799 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
800 		break;
801 
802 	case HCISETPTYPE:
803 		hdev->pkt_type = (__u16) dr.dev_opt;
804 		break;
805 
806 	case HCISETACLMTU:
807 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
808 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
809 		break;
810 
811 	case HCISETSCOMTU:
812 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
813 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
814 		break;
815 
816 	default:
817 		err = -EINVAL;
818 		break;
819 	}
820 
821 	hci_dev_put(hdev);
822 	return err;
823 }
824 
825 int hci_get_dev_list(void __user *arg)
826 {
827 	struct hci_dev *hdev;
828 	struct hci_dev_list_req *dl;
829 	struct hci_dev_req *dr;
830 	int n = 0, size, err;
831 	__u16 dev_num;
832 
833 	if (get_user(dev_num, (__u16 __user *) arg))
834 		return -EFAULT;
835 
836 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
837 		return -EINVAL;
838 
839 	size = sizeof(*dl) + dev_num * sizeof(*dr);
840 
841 	dl = kzalloc(size, GFP_KERNEL);
842 	if (!dl)
843 		return -ENOMEM;
844 
845 	dr = dl->dev_req;
846 
847 	read_lock(&hci_dev_list_lock);
848 	list_for_each_entry(hdev, &hci_dev_list, list) {
849 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
850 			cancel_delayed_work(&hdev->power_off);
851 
852 		if (!test_bit(HCI_MGMT, &hdev->flags))
853 			set_bit(HCI_PAIRABLE, &hdev->flags);
854 
855 		(dr + n)->dev_id  = hdev->id;
856 		(dr + n)->dev_opt = hdev->flags;
857 
858 		if (++n >= dev_num)
859 			break;
860 	}
861 	read_unlock(&hci_dev_list_lock);
862 
863 	dl->dev_num = n;
864 	size = sizeof(*dl) + n * sizeof(*dr);
865 
866 	err = copy_to_user(arg, dl, size);
867 	kfree(dl);
868 
869 	return err ? -EFAULT : 0;
870 }
871 
872 int hci_get_dev_info(void __user *arg)
873 {
874 	struct hci_dev *hdev;
875 	struct hci_dev_info di;
876 	int err = 0;
877 
878 	if (copy_from_user(&di, arg, sizeof(di)))
879 		return -EFAULT;
880 
881 	hdev = hci_dev_get(di.dev_id);
882 	if (!hdev)
883 		return -ENODEV;
884 
885 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
886 		cancel_delayed_work_sync(&hdev->power_off);
887 
888 	if (!test_bit(HCI_MGMT, &hdev->flags))
889 		set_bit(HCI_PAIRABLE, &hdev->flags);
890 
891 	strcpy(di.name, hdev->name);
892 	di.bdaddr   = hdev->bdaddr;
893 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
894 	di.flags    = hdev->flags;
895 	di.pkt_type = hdev->pkt_type;
896 	di.acl_mtu  = hdev->acl_mtu;
897 	di.acl_pkts = hdev->acl_pkts;
898 	di.sco_mtu  = hdev->sco_mtu;
899 	di.sco_pkts = hdev->sco_pkts;
900 	di.link_policy = hdev->link_policy;
901 	di.link_mode   = hdev->link_mode;
902 
903 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
904 	memcpy(&di.features, &hdev->features, sizeof(di.features));
905 
906 	if (copy_to_user(arg, &di, sizeof(di)))
907 		err = -EFAULT;
908 
909 	hci_dev_put(hdev);
910 
911 	return err;
912 }
913 
914 /* ---- Interface to HCI drivers ---- */
915 
916 static int hci_rfkill_set_block(void *data, bool blocked)
917 {
918 	struct hci_dev *hdev = data;
919 
920 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
921 
922 	if (!blocked)
923 		return 0;
924 
925 	hci_dev_do_close(hdev);
926 
927 	return 0;
928 }
929 
930 static const struct rfkill_ops hci_rfkill_ops = {
931 	.set_block = hci_rfkill_set_block,
932 };
933 
934 /* Alloc HCI device */
935 struct hci_dev *hci_alloc_dev(void)
936 {
937 	struct hci_dev *hdev;
938 
939 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
940 	if (!hdev)
941 		return NULL;
942 
943 	hci_init_sysfs(hdev);
944 	skb_queue_head_init(&hdev->driver_init);
945 
946 	return hdev;
947 }
948 EXPORT_SYMBOL(hci_alloc_dev);
949 
950 /* Free HCI device */
951 void hci_free_dev(struct hci_dev *hdev)
952 {
953 	skb_queue_purge(&hdev->driver_init);
954 
955 	/* will free via device release */
956 	put_device(&hdev->dev);
957 }
958 EXPORT_SYMBOL(hci_free_dev);
959 
960 static void hci_power_on(struct work_struct *work)
961 {
962 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
963 
964 	BT_DBG("%s", hdev->name);
965 
966 	if (hci_dev_open(hdev->id) < 0)
967 		return;
968 
969 	if (test_bit(HCI_AUTO_OFF, &hdev->flags))
970 		schedule_delayed_work(&hdev->power_off,
971 					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
972 
973 	if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
974 		mgmt_index_added(hdev);
975 }
976 
977 static void hci_power_off(struct work_struct *work)
978 {
979 	struct hci_dev *hdev = container_of(work, struct hci_dev,
980 							power_off.work);
981 
982 	BT_DBG("%s", hdev->name);
983 
984 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
985 
986 	hci_dev_close(hdev->id);
987 }
988 
989 static void hci_discov_off(struct work_struct *work)
990 {
991 	struct hci_dev *hdev;
992 	u8 scan = SCAN_PAGE;
993 
994 	hdev = container_of(work, struct hci_dev, discov_off.work);
995 
996 	BT_DBG("%s", hdev->name);
997 
998 	hci_dev_lock(hdev);
999 
1000 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1001 
1002 	hdev->discov_timeout = 0;
1003 
1004 	hci_dev_unlock(hdev);
1005 }
1006 
1007 int hci_uuids_clear(struct hci_dev *hdev)
1008 {
1009 	struct list_head *p, *n;
1010 
1011 	list_for_each_safe(p, n, &hdev->uuids) {
1012 		struct bt_uuid *uuid;
1013 
1014 		uuid = list_entry(p, struct bt_uuid, list);
1015 
1016 		list_del(p);
1017 		kfree(uuid);
1018 	}
1019 
1020 	return 0;
1021 }
1022 
1023 int hci_link_keys_clear(struct hci_dev *hdev)
1024 {
1025 	struct list_head *p, *n;
1026 
1027 	list_for_each_safe(p, n, &hdev->link_keys) {
1028 		struct link_key *key;
1029 
1030 		key = list_entry(p, struct link_key, list);
1031 
1032 		list_del(p);
1033 		kfree(key);
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1040 {
1041 	struct link_key *k;
1042 
1043 	list_for_each_entry(k, &hdev->link_keys, list)
1044 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1045 			return k;
1046 
1047 	return NULL;
1048 }
1049 
1050 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1051 						u8 key_type, u8 old_key_type)
1052 {
1053 	/* Legacy key */
1054 	if (key_type < 0x03)
1055 		return 1;
1056 
1057 	/* Debug keys are insecure so don't store them persistently */
1058 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1059 		return 0;
1060 
1061 	/* Changed combination key and there's no previous one */
1062 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1063 		return 0;
1064 
1065 	/* Security mode 3 case */
1066 	if (!conn)
1067 		return 1;
1068 
1069 	/* Neither local nor remote side had no-bonding as requirement */
1070 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1071 		return 1;
1072 
1073 	/* Local side had dedicated bonding as requirement */
1074 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1075 		return 1;
1076 
1077 	/* Remote side had dedicated bonding as requirement */
1078 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1079 		return 1;
1080 
1081 	/* If none of the above criteria match, then don't store the key
1082 	 * persistently */
1083 	return 0;
1084 }
1085 
1086 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1087 {
1088 	struct link_key *k;
1089 
1090 	list_for_each_entry(k, &hdev->link_keys, list) {
1091 		struct key_master_id *id;
1092 
1093 		if (k->type != HCI_LK_SMP_LTK)
1094 			continue;
1095 
1096 		if (k->dlen != sizeof(*id))
1097 			continue;
1098 
1099 		id = (void *) &k->data;
1100 		if (id->ediv == ediv &&
1101 				(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1102 			return k;
1103 	}
1104 
1105 	return NULL;
1106 }
1107 EXPORT_SYMBOL(hci_find_ltk);
1108 
1109 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1110 					bdaddr_t *bdaddr, u8 type)
1111 {
1112 	struct link_key *k;
1113 
1114 	list_for_each_entry(k, &hdev->link_keys, list)
1115 		if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1116 			return k;
1117 
1118 	return NULL;
1119 }
1120 EXPORT_SYMBOL(hci_find_link_key_type);
1121 
1122 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1123 				bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1124 {
1125 	struct link_key *key, *old_key;
1126 	u8 old_key_type, persistent;
1127 
1128 	old_key = hci_find_link_key(hdev, bdaddr);
1129 	if (old_key) {
1130 		old_key_type = old_key->type;
1131 		key = old_key;
1132 	} else {
1133 		old_key_type = conn ? conn->key_type : 0xff;
1134 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1135 		if (!key)
1136 			return -ENOMEM;
1137 		list_add(&key->list, &hdev->link_keys);
1138 	}
1139 
1140 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1141 
1142 	/* Some buggy controller combinations generate a changed
1143 	 * combination key for legacy pairing even when there's no
1144 	 * previous key */
1145 	if (type == HCI_LK_CHANGED_COMBINATION &&
1146 					(!conn || conn->remote_auth == 0xff) &&
1147 					old_key_type == 0xff) {
1148 		type = HCI_LK_COMBINATION;
1149 		if (conn)
1150 			conn->key_type = type;
1151 	}
1152 
1153 	bacpy(&key->bdaddr, bdaddr);
1154 	memcpy(key->val, val, 16);
1155 	key->pin_len = pin_len;
1156 
1157 	if (type == HCI_LK_CHANGED_COMBINATION)
1158 		key->type = old_key_type;
1159 	else
1160 		key->type = type;
1161 
1162 	if (!new_key)
1163 		return 0;
1164 
1165 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1166 
1167 	mgmt_new_link_key(hdev, key, persistent);
1168 
1169 	if (!persistent) {
1170 		list_del(&key->list);
1171 		kfree(key);
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1178 			u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1179 {
1180 	struct link_key *key, *old_key;
1181 	struct key_master_id *id;
1182 	u8 old_key_type;
1183 
1184 	BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1185 
1186 	old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1187 	if (old_key) {
1188 		key = old_key;
1189 		old_key_type = old_key->type;
1190 	} else {
1191 		key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1192 		if (!key)
1193 			return -ENOMEM;
1194 		list_add(&key->list, &hdev->link_keys);
1195 		old_key_type = 0xff;
1196 	}
1197 
1198 	key->dlen = sizeof(*id);
1199 
1200 	bacpy(&key->bdaddr, bdaddr);
1201 	memcpy(key->val, ltk, sizeof(key->val));
1202 	key->type = HCI_LK_SMP_LTK;
1203 	key->pin_len = key_size;
1204 
1205 	id = (void *) &key->data;
1206 	id->ediv = ediv;
1207 	memcpy(id->rand, rand, sizeof(id->rand));
1208 
1209 	if (new_key)
1210 		mgmt_new_link_key(hdev, key, old_key_type);
1211 
1212 	return 0;
1213 }
1214 
1215 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1216 {
1217 	struct link_key *key;
1218 
1219 	key = hci_find_link_key(hdev, bdaddr);
1220 	if (!key)
1221 		return -ENOENT;
1222 
1223 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1224 
1225 	list_del(&key->list);
1226 	kfree(key);
1227 
1228 	return 0;
1229 }
1230 
1231 /* HCI command timer function */
1232 static void hci_cmd_timer(unsigned long arg)
1233 {
1234 	struct hci_dev *hdev = (void *) arg;
1235 
1236 	BT_ERR("%s command tx timeout", hdev->name);
1237 	atomic_set(&hdev->cmd_cnt, 1);
1238 	queue_work(hdev->workqueue, &hdev->cmd_work);
1239 }
1240 
1241 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1242 							bdaddr_t *bdaddr)
1243 {
1244 	struct oob_data *data;
1245 
1246 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1247 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1248 			return data;
1249 
1250 	return NULL;
1251 }
1252 
1253 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1254 {
1255 	struct oob_data *data;
1256 
1257 	data = hci_find_remote_oob_data(hdev, bdaddr);
1258 	if (!data)
1259 		return -ENOENT;
1260 
1261 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1262 
1263 	list_del(&data->list);
1264 	kfree(data);
1265 
1266 	return 0;
1267 }
1268 
1269 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1270 {
1271 	struct oob_data *data, *n;
1272 
1273 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1274 		list_del(&data->list);
1275 		kfree(data);
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1282 								u8 *randomizer)
1283 {
1284 	struct oob_data *data;
1285 
1286 	data = hci_find_remote_oob_data(hdev, bdaddr);
1287 
1288 	if (!data) {
1289 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1290 		if (!data)
1291 			return -ENOMEM;
1292 
1293 		bacpy(&data->bdaddr, bdaddr);
1294 		list_add(&data->list, &hdev->remote_oob_data);
1295 	}
1296 
1297 	memcpy(data->hash, hash, sizeof(data->hash));
1298 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1299 
1300 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1301 
1302 	return 0;
1303 }
1304 
1305 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1306 						bdaddr_t *bdaddr)
1307 {
1308 	struct bdaddr_list *b;
1309 
1310 	list_for_each_entry(b, &hdev->blacklist, list)
1311 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1312 			return b;
1313 
1314 	return NULL;
1315 }
1316 
1317 int hci_blacklist_clear(struct hci_dev *hdev)
1318 {
1319 	struct list_head *p, *n;
1320 
1321 	list_for_each_safe(p, n, &hdev->blacklist) {
1322 		struct bdaddr_list *b;
1323 
1324 		b = list_entry(p, struct bdaddr_list, list);
1325 
1326 		list_del(p);
1327 		kfree(b);
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334 {
1335 	struct bdaddr_list *entry;
1336 
1337 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1338 		return -EBADF;
1339 
1340 	if (hci_blacklist_lookup(hdev, bdaddr))
1341 		return -EEXIST;
1342 
1343 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1344 	if (!entry)
1345 		return -ENOMEM;
1346 
1347 	bacpy(&entry->bdaddr, bdaddr);
1348 
1349 	list_add(&entry->list, &hdev->blacklist);
1350 
1351 	return mgmt_device_blocked(hdev, bdaddr);
1352 }
1353 
1354 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355 {
1356 	struct bdaddr_list *entry;
1357 
1358 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1359 		return hci_blacklist_clear(hdev);
1360 
1361 	entry = hci_blacklist_lookup(hdev, bdaddr);
1362 	if (!entry)
1363 		return -ENOENT;
1364 
1365 	list_del(&entry->list);
1366 	kfree(entry);
1367 
1368 	return mgmt_device_unblocked(hdev, bdaddr);
1369 }
1370 
1371 static void hci_clear_adv_cache(struct work_struct *work)
1372 {
1373 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1374 							adv_work.work);
1375 
1376 	hci_dev_lock(hdev);
1377 
1378 	hci_adv_entries_clear(hdev);
1379 
1380 	hci_dev_unlock(hdev);
1381 }
1382 
1383 int hci_adv_entries_clear(struct hci_dev *hdev)
1384 {
1385 	struct adv_entry *entry, *tmp;
1386 
1387 	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1388 		list_del(&entry->list);
1389 		kfree(entry);
1390 	}
1391 
1392 	BT_DBG("%s adv cache cleared", hdev->name);
1393 
1394 	return 0;
1395 }
1396 
1397 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1398 {
1399 	struct adv_entry *entry;
1400 
1401 	list_for_each_entry(entry, &hdev->adv_entries, list)
1402 		if (bacmp(bdaddr, &entry->bdaddr) == 0)
1403 			return entry;
1404 
1405 	return NULL;
1406 }
1407 
1408 static inline int is_connectable_adv(u8 evt_type)
1409 {
1410 	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1411 		return 1;
1412 
1413 	return 0;
1414 }
1415 
1416 int hci_add_adv_entry(struct hci_dev *hdev,
1417 					struct hci_ev_le_advertising_info *ev)
1418 {
1419 	struct adv_entry *entry;
1420 
1421 	if (!is_connectable_adv(ev->evt_type))
1422 		return -EINVAL;
1423 
1424 	/* Only new entries should be added to adv_entries. So, if
1425 	 * bdaddr was found, don't add it. */
1426 	if (hci_find_adv_entry(hdev, &ev->bdaddr))
1427 		return 0;
1428 
1429 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1430 	if (!entry)
1431 		return -ENOMEM;
1432 
1433 	bacpy(&entry->bdaddr, &ev->bdaddr);
1434 	entry->bdaddr_type = ev->bdaddr_type;
1435 
1436 	list_add(&entry->list, &hdev->adv_entries);
1437 
1438 	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1439 				batostr(&entry->bdaddr), entry->bdaddr_type);
1440 
1441 	return 0;
1442 }
1443 
1444 /* Register HCI device */
1445 int hci_register_dev(struct hci_dev *hdev)
1446 {
1447 	struct list_head *head = &hci_dev_list, *p;
1448 	int i, id, error;
1449 
1450 	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1451 						hdev->bus, hdev->owner);
1452 
1453 	if (!hdev->open || !hdev->close || !hdev->destruct)
1454 		return -EINVAL;
1455 
1456 	/* Do not allow HCI_AMP devices to register at index 0,
1457 	 * so the index can be used as the AMP controller ID.
1458 	 */
1459 	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1460 
1461 	write_lock(&hci_dev_list_lock);
1462 
1463 	/* Find first available device id */
1464 	list_for_each(p, &hci_dev_list) {
1465 		if (list_entry(p, struct hci_dev, list)->id != id)
1466 			break;
1467 		head = p; id++;
1468 	}
1469 
1470 	sprintf(hdev->name, "hci%d", id);
1471 	hdev->id = id;
1472 	list_add_tail(&hdev->list, head);
1473 
1474 	atomic_set(&hdev->refcnt, 1);
1475 	mutex_init(&hdev->lock);
1476 
1477 	hdev->flags = 0;
1478 	hdev->dev_flags = 0;
1479 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1480 	hdev->esco_type = (ESCO_HV1);
1481 	hdev->link_mode = (HCI_LM_ACCEPT);
1482 	hdev->io_capability = 0x03; /* No Input No Output */
1483 
1484 	hdev->idle_timeout = 0;
1485 	hdev->sniff_max_interval = 800;
1486 	hdev->sniff_min_interval = 80;
1487 
1488 	INIT_WORK(&hdev->rx_work, hci_rx_work);
1489 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1490 	INIT_WORK(&hdev->tx_work, hci_tx_work);
1491 
1492 
1493 	skb_queue_head_init(&hdev->rx_q);
1494 	skb_queue_head_init(&hdev->cmd_q);
1495 	skb_queue_head_init(&hdev->raw_q);
1496 
1497 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1498 
1499 	for (i = 0; i < NUM_REASSEMBLY; i++)
1500 		hdev->reassembly[i] = NULL;
1501 
1502 	init_waitqueue_head(&hdev->req_wait_q);
1503 	mutex_init(&hdev->req_lock);
1504 
1505 	inquiry_cache_init(hdev);
1506 
1507 	hci_conn_hash_init(hdev);
1508 
1509 	INIT_LIST_HEAD(&hdev->mgmt_pending);
1510 
1511 	INIT_LIST_HEAD(&hdev->blacklist);
1512 
1513 	INIT_LIST_HEAD(&hdev->uuids);
1514 
1515 	INIT_LIST_HEAD(&hdev->link_keys);
1516 
1517 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1518 
1519 	INIT_LIST_HEAD(&hdev->adv_entries);
1520 
1521 	INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1522 	INIT_WORK(&hdev->power_on, hci_power_on);
1523 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1524 
1525 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1526 
1527 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1528 
1529 	atomic_set(&hdev->promisc, 0);
1530 
1531 	write_unlock(&hci_dev_list_lock);
1532 
1533 	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1534 							WQ_MEM_RECLAIM, 1);
1535 	if (!hdev->workqueue) {
1536 		error = -ENOMEM;
1537 		goto err;
1538 	}
1539 
1540 	error = hci_add_sysfs(hdev);
1541 	if (error < 0)
1542 		goto err_wqueue;
1543 
1544 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1545 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1546 	if (hdev->rfkill) {
1547 		if (rfkill_register(hdev->rfkill) < 0) {
1548 			rfkill_destroy(hdev->rfkill);
1549 			hdev->rfkill = NULL;
1550 		}
1551 	}
1552 
1553 	set_bit(HCI_AUTO_OFF, &hdev->flags);
1554 	set_bit(HCI_SETUP, &hdev->flags);
1555 	schedule_work(&hdev->power_on);
1556 
1557 	hci_notify(hdev, HCI_DEV_REG);
1558 
1559 	return id;
1560 
1561 err_wqueue:
1562 	destroy_workqueue(hdev->workqueue);
1563 err:
1564 	write_lock(&hci_dev_list_lock);
1565 	list_del(&hdev->list);
1566 	write_unlock(&hci_dev_list_lock);
1567 
1568 	return error;
1569 }
1570 EXPORT_SYMBOL(hci_register_dev);
1571 
1572 /* Unregister HCI device */
1573 void hci_unregister_dev(struct hci_dev *hdev)
1574 {
1575 	int i;
1576 
1577 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1578 
1579 	write_lock(&hci_dev_list_lock);
1580 	list_del(&hdev->list);
1581 	write_unlock(&hci_dev_list_lock);
1582 
1583 	hci_dev_do_close(hdev);
1584 
1585 	for (i = 0; i < NUM_REASSEMBLY; i++)
1586 		kfree_skb(hdev->reassembly[i]);
1587 
1588 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1589 					!test_bit(HCI_SETUP, &hdev->flags)) {
1590 		hci_dev_lock(hdev);
1591 		mgmt_index_removed(hdev);
1592 		hci_dev_unlock(hdev);
1593 	}
1594 
1595 	/* mgmt_index_removed should take care of emptying the
1596 	 * pending list */
1597 	BUG_ON(!list_empty(&hdev->mgmt_pending));
1598 
1599 	hci_notify(hdev, HCI_DEV_UNREG);
1600 
1601 	if (hdev->rfkill) {
1602 		rfkill_unregister(hdev->rfkill);
1603 		rfkill_destroy(hdev->rfkill);
1604 	}
1605 
1606 	hci_del_sysfs(hdev);
1607 
1608 	cancel_delayed_work_sync(&hdev->adv_work);
1609 
1610 	destroy_workqueue(hdev->workqueue);
1611 
1612 	hci_dev_lock(hdev);
1613 	hci_blacklist_clear(hdev);
1614 	hci_uuids_clear(hdev);
1615 	hci_link_keys_clear(hdev);
1616 	hci_remote_oob_data_clear(hdev);
1617 	hci_adv_entries_clear(hdev);
1618 	hci_dev_unlock(hdev);
1619 
1620 	__hci_dev_put(hdev);
1621 }
1622 EXPORT_SYMBOL(hci_unregister_dev);
1623 
1624 /* Suspend HCI device */
1625 int hci_suspend_dev(struct hci_dev *hdev)
1626 {
1627 	hci_notify(hdev, HCI_DEV_SUSPEND);
1628 	return 0;
1629 }
1630 EXPORT_SYMBOL(hci_suspend_dev);
1631 
1632 /* Resume HCI device */
1633 int hci_resume_dev(struct hci_dev *hdev)
1634 {
1635 	hci_notify(hdev, HCI_DEV_RESUME);
1636 	return 0;
1637 }
1638 EXPORT_SYMBOL(hci_resume_dev);
1639 
1640 /* Receive frame from HCI drivers */
1641 int hci_recv_frame(struct sk_buff *skb)
1642 {
1643 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1644 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1645 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1646 		kfree_skb(skb);
1647 		return -ENXIO;
1648 	}
1649 
1650 	/* Incomming skb */
1651 	bt_cb(skb)->incoming = 1;
1652 
1653 	/* Time stamp */
1654 	__net_timestamp(skb);
1655 
1656 	skb_queue_tail(&hdev->rx_q, skb);
1657 	queue_work(hdev->workqueue, &hdev->rx_work);
1658 
1659 	return 0;
1660 }
1661 EXPORT_SYMBOL(hci_recv_frame);
1662 
1663 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1664 						  int count, __u8 index)
1665 {
1666 	int len = 0;
1667 	int hlen = 0;
1668 	int remain = count;
1669 	struct sk_buff *skb;
1670 	struct bt_skb_cb *scb;
1671 
1672 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1673 				index >= NUM_REASSEMBLY)
1674 		return -EILSEQ;
1675 
1676 	skb = hdev->reassembly[index];
1677 
1678 	if (!skb) {
1679 		switch (type) {
1680 		case HCI_ACLDATA_PKT:
1681 			len = HCI_MAX_FRAME_SIZE;
1682 			hlen = HCI_ACL_HDR_SIZE;
1683 			break;
1684 		case HCI_EVENT_PKT:
1685 			len = HCI_MAX_EVENT_SIZE;
1686 			hlen = HCI_EVENT_HDR_SIZE;
1687 			break;
1688 		case HCI_SCODATA_PKT:
1689 			len = HCI_MAX_SCO_SIZE;
1690 			hlen = HCI_SCO_HDR_SIZE;
1691 			break;
1692 		}
1693 
1694 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1695 		if (!skb)
1696 			return -ENOMEM;
1697 
1698 		scb = (void *) skb->cb;
1699 		scb->expect = hlen;
1700 		scb->pkt_type = type;
1701 
1702 		skb->dev = (void *) hdev;
1703 		hdev->reassembly[index] = skb;
1704 	}
1705 
1706 	while (count) {
1707 		scb = (void *) skb->cb;
1708 		len = min(scb->expect, (__u16)count);
1709 
1710 		memcpy(skb_put(skb, len), data, len);
1711 
1712 		count -= len;
1713 		data += len;
1714 		scb->expect -= len;
1715 		remain = count;
1716 
1717 		switch (type) {
1718 		case HCI_EVENT_PKT:
1719 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1720 				struct hci_event_hdr *h = hci_event_hdr(skb);
1721 				scb->expect = h->plen;
1722 
1723 				if (skb_tailroom(skb) < scb->expect) {
1724 					kfree_skb(skb);
1725 					hdev->reassembly[index] = NULL;
1726 					return -ENOMEM;
1727 				}
1728 			}
1729 			break;
1730 
1731 		case HCI_ACLDATA_PKT:
1732 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1733 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1734 				scb->expect = __le16_to_cpu(h->dlen);
1735 
1736 				if (skb_tailroom(skb) < scb->expect) {
1737 					kfree_skb(skb);
1738 					hdev->reassembly[index] = NULL;
1739 					return -ENOMEM;
1740 				}
1741 			}
1742 			break;
1743 
1744 		case HCI_SCODATA_PKT:
1745 			if (skb->len == HCI_SCO_HDR_SIZE) {
1746 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1747 				scb->expect = h->dlen;
1748 
1749 				if (skb_tailroom(skb) < scb->expect) {
1750 					kfree_skb(skb);
1751 					hdev->reassembly[index] = NULL;
1752 					return -ENOMEM;
1753 				}
1754 			}
1755 			break;
1756 		}
1757 
1758 		if (scb->expect == 0) {
1759 			/* Complete frame */
1760 
1761 			bt_cb(skb)->pkt_type = type;
1762 			hci_recv_frame(skb);
1763 
1764 			hdev->reassembly[index] = NULL;
1765 			return remain;
1766 		}
1767 	}
1768 
1769 	return remain;
1770 }
1771 
1772 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1773 {
1774 	int rem = 0;
1775 
1776 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1777 		return -EILSEQ;
1778 
1779 	while (count) {
1780 		rem = hci_reassembly(hdev, type, data, count, type - 1);
1781 		if (rem < 0)
1782 			return rem;
1783 
1784 		data += (count - rem);
1785 		count = rem;
1786 	}
1787 
1788 	return rem;
1789 }
1790 EXPORT_SYMBOL(hci_recv_fragment);
1791 
1792 #define STREAM_REASSEMBLY 0
1793 
1794 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1795 {
1796 	int type;
1797 	int rem = 0;
1798 
1799 	while (count) {
1800 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1801 
1802 		if (!skb) {
1803 			struct { char type; } *pkt;
1804 
1805 			/* Start of the frame */
1806 			pkt = data;
1807 			type = pkt->type;
1808 
1809 			data++;
1810 			count--;
1811 		} else
1812 			type = bt_cb(skb)->pkt_type;
1813 
1814 		rem = hci_reassembly(hdev, type, data, count,
1815 							STREAM_REASSEMBLY);
1816 		if (rem < 0)
1817 			return rem;
1818 
1819 		data += (count - rem);
1820 		count = rem;
1821 	}
1822 
1823 	return rem;
1824 }
1825 EXPORT_SYMBOL(hci_recv_stream_fragment);
1826 
1827 /* ---- Interface to upper protocols ---- */
1828 
1829 int hci_register_cb(struct hci_cb *cb)
1830 {
1831 	BT_DBG("%p name %s", cb, cb->name);
1832 
1833 	write_lock(&hci_cb_list_lock);
1834 	list_add(&cb->list, &hci_cb_list);
1835 	write_unlock(&hci_cb_list_lock);
1836 
1837 	return 0;
1838 }
1839 EXPORT_SYMBOL(hci_register_cb);
1840 
1841 int hci_unregister_cb(struct hci_cb *cb)
1842 {
1843 	BT_DBG("%p name %s", cb, cb->name);
1844 
1845 	write_lock(&hci_cb_list_lock);
1846 	list_del(&cb->list);
1847 	write_unlock(&hci_cb_list_lock);
1848 
1849 	return 0;
1850 }
1851 EXPORT_SYMBOL(hci_unregister_cb);
1852 
1853 static int hci_send_frame(struct sk_buff *skb)
1854 {
1855 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1856 
1857 	if (!hdev) {
1858 		kfree_skb(skb);
1859 		return -ENODEV;
1860 	}
1861 
1862 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1863 
1864 	if (atomic_read(&hdev->promisc)) {
1865 		/* Time stamp */
1866 		__net_timestamp(skb);
1867 
1868 		hci_send_to_sock(hdev, skb, NULL);
1869 	}
1870 
1871 	/* Get rid of skb owner, prior to sending to the driver. */
1872 	skb_orphan(skb);
1873 
1874 	return hdev->send(skb);
1875 }
1876 
1877 /* Send HCI command */
1878 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1879 {
1880 	int len = HCI_COMMAND_HDR_SIZE + plen;
1881 	struct hci_command_hdr *hdr;
1882 	struct sk_buff *skb;
1883 
1884 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1885 
1886 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1887 	if (!skb) {
1888 		BT_ERR("%s no memory for command", hdev->name);
1889 		return -ENOMEM;
1890 	}
1891 
1892 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1893 	hdr->opcode = cpu_to_le16(opcode);
1894 	hdr->plen   = plen;
1895 
1896 	if (plen)
1897 		memcpy(skb_put(skb, plen), param, plen);
1898 
1899 	BT_DBG("skb len %d", skb->len);
1900 
1901 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1902 	skb->dev = (void *) hdev;
1903 
1904 	if (test_bit(HCI_INIT, &hdev->flags))
1905 		hdev->init_last_cmd = opcode;
1906 
1907 	skb_queue_tail(&hdev->cmd_q, skb);
1908 	queue_work(hdev->workqueue, &hdev->cmd_work);
1909 
1910 	return 0;
1911 }
1912 
1913 /* Get data from the previously sent command */
1914 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1915 {
1916 	struct hci_command_hdr *hdr;
1917 
1918 	if (!hdev->sent_cmd)
1919 		return NULL;
1920 
1921 	hdr = (void *) hdev->sent_cmd->data;
1922 
1923 	if (hdr->opcode != cpu_to_le16(opcode))
1924 		return NULL;
1925 
1926 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1927 
1928 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1929 }
1930 
1931 /* Send ACL data */
1932 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1933 {
1934 	struct hci_acl_hdr *hdr;
1935 	int len = skb->len;
1936 
1937 	skb_push(skb, HCI_ACL_HDR_SIZE);
1938 	skb_reset_transport_header(skb);
1939 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1940 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1941 	hdr->dlen   = cpu_to_le16(len);
1942 }
1943 
1944 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1945 				struct sk_buff *skb, __u16 flags)
1946 {
1947 	struct hci_dev *hdev = conn->hdev;
1948 	struct sk_buff *list;
1949 
1950 	list = skb_shinfo(skb)->frag_list;
1951 	if (!list) {
1952 		/* Non fragmented */
1953 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1954 
1955 		skb_queue_tail(queue, skb);
1956 	} else {
1957 		/* Fragmented */
1958 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1959 
1960 		skb_shinfo(skb)->frag_list = NULL;
1961 
1962 		/* Queue all fragments atomically */
1963 		spin_lock(&queue->lock);
1964 
1965 		__skb_queue_tail(queue, skb);
1966 
1967 		flags &= ~ACL_START;
1968 		flags |= ACL_CONT;
1969 		do {
1970 			skb = list; list = list->next;
1971 
1972 			skb->dev = (void *) hdev;
1973 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1974 			hci_add_acl_hdr(skb, conn->handle, flags);
1975 
1976 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1977 
1978 			__skb_queue_tail(queue, skb);
1979 		} while (list);
1980 
1981 		spin_unlock(&queue->lock);
1982 	}
1983 }
1984 
1985 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1986 {
1987 	struct hci_conn *conn = chan->conn;
1988 	struct hci_dev *hdev = conn->hdev;
1989 
1990 	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1991 
1992 	skb->dev = (void *) hdev;
1993 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1994 	hci_add_acl_hdr(skb, conn->handle, flags);
1995 
1996 	hci_queue_acl(conn, &chan->data_q, skb, flags);
1997 
1998 	queue_work(hdev->workqueue, &hdev->tx_work);
1999 }
2000 EXPORT_SYMBOL(hci_send_acl);
2001 
2002 /* Send SCO data */
2003 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2004 {
2005 	struct hci_dev *hdev = conn->hdev;
2006 	struct hci_sco_hdr hdr;
2007 
2008 	BT_DBG("%s len %d", hdev->name, skb->len);
2009 
2010 	hdr.handle = cpu_to_le16(conn->handle);
2011 	hdr.dlen   = skb->len;
2012 
2013 	skb_push(skb, HCI_SCO_HDR_SIZE);
2014 	skb_reset_transport_header(skb);
2015 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2016 
2017 	skb->dev = (void *) hdev;
2018 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2019 
2020 	skb_queue_tail(&conn->data_q, skb);
2021 	queue_work(hdev->workqueue, &hdev->tx_work);
2022 }
2023 EXPORT_SYMBOL(hci_send_sco);
2024 
2025 /* ---- HCI TX task (outgoing data) ---- */
2026 
2027 /* HCI Connection scheduler */
2028 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2029 {
2030 	struct hci_conn_hash *h = &hdev->conn_hash;
2031 	struct hci_conn *conn = NULL, *c;
2032 	int num = 0, min = ~0;
2033 
2034 	/* We don't have to lock device here. Connections are always
2035 	 * added and removed with TX task disabled. */
2036 
2037 	rcu_read_lock();
2038 
2039 	list_for_each_entry_rcu(c, &h->list, list) {
2040 		if (c->type != type || skb_queue_empty(&c->data_q))
2041 			continue;
2042 
2043 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2044 			continue;
2045 
2046 		num++;
2047 
2048 		if (c->sent < min) {
2049 			min  = c->sent;
2050 			conn = c;
2051 		}
2052 
2053 		if (hci_conn_num(hdev, type) == num)
2054 			break;
2055 	}
2056 
2057 	rcu_read_unlock();
2058 
2059 	if (conn) {
2060 		int cnt, q;
2061 
2062 		switch (conn->type) {
2063 		case ACL_LINK:
2064 			cnt = hdev->acl_cnt;
2065 			break;
2066 		case SCO_LINK:
2067 		case ESCO_LINK:
2068 			cnt = hdev->sco_cnt;
2069 			break;
2070 		case LE_LINK:
2071 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2072 			break;
2073 		default:
2074 			cnt = 0;
2075 			BT_ERR("Unknown link type");
2076 		}
2077 
2078 		q = cnt / num;
2079 		*quote = q ? q : 1;
2080 	} else
2081 		*quote = 0;
2082 
2083 	BT_DBG("conn %p quote %d", conn, *quote);
2084 	return conn;
2085 }
2086 
2087 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2088 {
2089 	struct hci_conn_hash *h = &hdev->conn_hash;
2090 	struct hci_conn *c;
2091 
2092 	BT_ERR("%s link tx timeout", hdev->name);
2093 
2094 	rcu_read_lock();
2095 
2096 	/* Kill stalled connections */
2097 	list_for_each_entry_rcu(c, &h->list, list) {
2098 		if (c->type == type && c->sent) {
2099 			BT_ERR("%s killing stalled connection %s",
2100 				hdev->name, batostr(&c->dst));
2101 			hci_acl_disconn(c, 0x13);
2102 		}
2103 	}
2104 
2105 	rcu_read_unlock();
2106 }
2107 
2108 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2109 						int *quote)
2110 {
2111 	struct hci_conn_hash *h = &hdev->conn_hash;
2112 	struct hci_chan *chan = NULL;
2113 	int num = 0, min = ~0, cur_prio = 0;
2114 	struct hci_conn *conn;
2115 	int cnt, q, conn_num = 0;
2116 
2117 	BT_DBG("%s", hdev->name);
2118 
2119 	rcu_read_lock();
2120 
2121 	list_for_each_entry_rcu(conn, &h->list, list) {
2122 		struct hci_chan *tmp;
2123 
2124 		if (conn->type != type)
2125 			continue;
2126 
2127 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2128 			continue;
2129 
2130 		conn_num++;
2131 
2132 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2133 			struct sk_buff *skb;
2134 
2135 			if (skb_queue_empty(&tmp->data_q))
2136 				continue;
2137 
2138 			skb = skb_peek(&tmp->data_q);
2139 			if (skb->priority < cur_prio)
2140 				continue;
2141 
2142 			if (skb->priority > cur_prio) {
2143 				num = 0;
2144 				min = ~0;
2145 				cur_prio = skb->priority;
2146 			}
2147 
2148 			num++;
2149 
2150 			if (conn->sent < min) {
2151 				min  = conn->sent;
2152 				chan = tmp;
2153 			}
2154 		}
2155 
2156 		if (hci_conn_num(hdev, type) == conn_num)
2157 			break;
2158 	}
2159 
2160 	rcu_read_unlock();
2161 
2162 	if (!chan)
2163 		return NULL;
2164 
2165 	switch (chan->conn->type) {
2166 	case ACL_LINK:
2167 		cnt = hdev->acl_cnt;
2168 		break;
2169 	case SCO_LINK:
2170 	case ESCO_LINK:
2171 		cnt = hdev->sco_cnt;
2172 		break;
2173 	case LE_LINK:
2174 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2175 		break;
2176 	default:
2177 		cnt = 0;
2178 		BT_ERR("Unknown link type");
2179 	}
2180 
2181 	q = cnt / num;
2182 	*quote = q ? q : 1;
2183 	BT_DBG("chan %p quote %d", chan, *quote);
2184 	return chan;
2185 }
2186 
2187 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2188 {
2189 	struct hci_conn_hash *h = &hdev->conn_hash;
2190 	struct hci_conn *conn;
2191 	int num = 0;
2192 
2193 	BT_DBG("%s", hdev->name);
2194 
2195 	rcu_read_lock();
2196 
2197 	list_for_each_entry_rcu(conn, &h->list, list) {
2198 		struct hci_chan *chan;
2199 
2200 		if (conn->type != type)
2201 			continue;
2202 
2203 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2204 			continue;
2205 
2206 		num++;
2207 
2208 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2209 			struct sk_buff *skb;
2210 
2211 			if (chan->sent) {
2212 				chan->sent = 0;
2213 				continue;
2214 			}
2215 
2216 			if (skb_queue_empty(&chan->data_q))
2217 				continue;
2218 
2219 			skb = skb_peek(&chan->data_q);
2220 			if (skb->priority >= HCI_PRIO_MAX - 1)
2221 				continue;
2222 
2223 			skb->priority = HCI_PRIO_MAX - 1;
2224 
2225 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2226 								skb->priority);
2227 		}
2228 
2229 		if (hci_conn_num(hdev, type) == num)
2230 			break;
2231 	}
2232 
2233 	rcu_read_unlock();
2234 
2235 }
2236 
2237 static inline void hci_sched_acl(struct hci_dev *hdev)
2238 {
2239 	struct hci_chan *chan;
2240 	struct sk_buff *skb;
2241 	int quote;
2242 	unsigned int cnt;
2243 
2244 	BT_DBG("%s", hdev->name);
2245 
2246 	if (!hci_conn_num(hdev, ACL_LINK))
2247 		return;
2248 
2249 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2250 		/* ACL tx timeout must be longer than maximum
2251 		 * link supervision timeout (40.9 seconds) */
2252 		if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2253 			hci_link_tx_to(hdev, ACL_LINK);
2254 	}
2255 
2256 	cnt = hdev->acl_cnt;
2257 
2258 	while (hdev->acl_cnt &&
2259 			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2260 		u32 priority = (skb_peek(&chan->data_q))->priority;
2261 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2262 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2263 					skb->len, skb->priority);
2264 
2265 			/* Stop if priority has changed */
2266 			if (skb->priority < priority)
2267 				break;
2268 
2269 			skb = skb_dequeue(&chan->data_q);
2270 
2271 			hci_conn_enter_active_mode(chan->conn,
2272 						bt_cb(skb)->force_active);
2273 
2274 			hci_send_frame(skb);
2275 			hdev->acl_last_tx = jiffies;
2276 
2277 			hdev->acl_cnt--;
2278 			chan->sent++;
2279 			chan->conn->sent++;
2280 		}
2281 	}
2282 
2283 	if (cnt != hdev->acl_cnt)
2284 		hci_prio_recalculate(hdev, ACL_LINK);
2285 }
2286 
2287 /* Schedule SCO */
2288 static inline void hci_sched_sco(struct hci_dev *hdev)
2289 {
2290 	struct hci_conn *conn;
2291 	struct sk_buff *skb;
2292 	int quote;
2293 
2294 	BT_DBG("%s", hdev->name);
2295 
2296 	if (!hci_conn_num(hdev, SCO_LINK))
2297 		return;
2298 
2299 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2300 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2301 			BT_DBG("skb %p len %d", skb, skb->len);
2302 			hci_send_frame(skb);
2303 
2304 			conn->sent++;
2305 			if (conn->sent == ~0)
2306 				conn->sent = 0;
2307 		}
2308 	}
2309 }
2310 
2311 static inline void hci_sched_esco(struct hci_dev *hdev)
2312 {
2313 	struct hci_conn *conn;
2314 	struct sk_buff *skb;
2315 	int quote;
2316 
2317 	BT_DBG("%s", hdev->name);
2318 
2319 	if (!hci_conn_num(hdev, ESCO_LINK))
2320 		return;
2321 
2322 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2323 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2324 			BT_DBG("skb %p len %d", skb, skb->len);
2325 			hci_send_frame(skb);
2326 
2327 			conn->sent++;
2328 			if (conn->sent == ~0)
2329 				conn->sent = 0;
2330 		}
2331 	}
2332 }
2333 
2334 static inline void hci_sched_le(struct hci_dev *hdev)
2335 {
2336 	struct hci_chan *chan;
2337 	struct sk_buff *skb;
2338 	int quote, cnt, tmp;
2339 
2340 	BT_DBG("%s", hdev->name);
2341 
2342 	if (!hci_conn_num(hdev, LE_LINK))
2343 		return;
2344 
2345 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2346 		/* LE tx timeout must be longer than maximum
2347 		 * link supervision timeout (40.9 seconds) */
2348 		if (!hdev->le_cnt && hdev->le_pkts &&
2349 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2350 			hci_link_tx_to(hdev, LE_LINK);
2351 	}
2352 
2353 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2354 	tmp = cnt;
2355 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2356 		u32 priority = (skb_peek(&chan->data_q))->priority;
2357 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2358 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2359 					skb->len, skb->priority);
2360 
2361 			/* Stop if priority has changed */
2362 			if (skb->priority < priority)
2363 				break;
2364 
2365 			skb = skb_dequeue(&chan->data_q);
2366 
2367 			hci_send_frame(skb);
2368 			hdev->le_last_tx = jiffies;
2369 
2370 			cnt--;
2371 			chan->sent++;
2372 			chan->conn->sent++;
2373 		}
2374 	}
2375 
2376 	if (hdev->le_pkts)
2377 		hdev->le_cnt = cnt;
2378 	else
2379 		hdev->acl_cnt = cnt;
2380 
2381 	if (cnt != tmp)
2382 		hci_prio_recalculate(hdev, LE_LINK);
2383 }
2384 
2385 static void hci_tx_work(struct work_struct *work)
2386 {
2387 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2388 	struct sk_buff *skb;
2389 
2390 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2391 		hdev->sco_cnt, hdev->le_cnt);
2392 
2393 	/* Schedule queues and send stuff to HCI driver */
2394 
2395 	hci_sched_acl(hdev);
2396 
2397 	hci_sched_sco(hdev);
2398 
2399 	hci_sched_esco(hdev);
2400 
2401 	hci_sched_le(hdev);
2402 
2403 	/* Send next queued raw (unknown type) packet */
2404 	while ((skb = skb_dequeue(&hdev->raw_q)))
2405 		hci_send_frame(skb);
2406 }
2407 
2408 /* ----- HCI RX task (incoming data processing) ----- */
2409 
2410 /* ACL data packet */
2411 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2412 {
2413 	struct hci_acl_hdr *hdr = (void *) skb->data;
2414 	struct hci_conn *conn;
2415 	__u16 handle, flags;
2416 
2417 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2418 
2419 	handle = __le16_to_cpu(hdr->handle);
2420 	flags  = hci_flags(handle);
2421 	handle = hci_handle(handle);
2422 
2423 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2424 
2425 	hdev->stat.acl_rx++;
2426 
2427 	hci_dev_lock(hdev);
2428 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2429 	hci_dev_unlock(hdev);
2430 
2431 	if (conn) {
2432 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2433 
2434 		/* Send to upper protocol */
2435 		l2cap_recv_acldata(conn, skb, flags);
2436 		return;
2437 	} else {
2438 		BT_ERR("%s ACL packet for unknown connection handle %d",
2439 			hdev->name, handle);
2440 	}
2441 
2442 	kfree_skb(skb);
2443 }
2444 
2445 /* SCO data packet */
2446 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2447 {
2448 	struct hci_sco_hdr *hdr = (void *) skb->data;
2449 	struct hci_conn *conn;
2450 	__u16 handle;
2451 
2452 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2453 
2454 	handle = __le16_to_cpu(hdr->handle);
2455 
2456 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2457 
2458 	hdev->stat.sco_rx++;
2459 
2460 	hci_dev_lock(hdev);
2461 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2462 	hci_dev_unlock(hdev);
2463 
2464 	if (conn) {
2465 		/* Send to upper protocol */
2466 		sco_recv_scodata(conn, skb);
2467 		return;
2468 	} else {
2469 		BT_ERR("%s SCO packet for unknown connection handle %d",
2470 			hdev->name, handle);
2471 	}
2472 
2473 	kfree_skb(skb);
2474 }
2475 
2476 static void hci_rx_work(struct work_struct *work)
2477 {
2478 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2479 	struct sk_buff *skb;
2480 
2481 	BT_DBG("%s", hdev->name);
2482 
2483 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2484 		if (atomic_read(&hdev->promisc)) {
2485 			/* Send copy to the sockets */
2486 			hci_send_to_sock(hdev, skb, NULL);
2487 		}
2488 
2489 		if (test_bit(HCI_RAW, &hdev->flags)) {
2490 			kfree_skb(skb);
2491 			continue;
2492 		}
2493 
2494 		if (test_bit(HCI_INIT, &hdev->flags)) {
2495 			/* Don't process data packets in this states. */
2496 			switch (bt_cb(skb)->pkt_type) {
2497 			case HCI_ACLDATA_PKT:
2498 			case HCI_SCODATA_PKT:
2499 				kfree_skb(skb);
2500 				continue;
2501 			}
2502 		}
2503 
2504 		/* Process frame */
2505 		switch (bt_cb(skb)->pkt_type) {
2506 		case HCI_EVENT_PKT:
2507 			BT_DBG("%s Event packet", hdev->name);
2508 			hci_event_packet(hdev, skb);
2509 			break;
2510 
2511 		case HCI_ACLDATA_PKT:
2512 			BT_DBG("%s ACL data packet", hdev->name);
2513 			hci_acldata_packet(hdev, skb);
2514 			break;
2515 
2516 		case HCI_SCODATA_PKT:
2517 			BT_DBG("%s SCO data packet", hdev->name);
2518 			hci_scodata_packet(hdev, skb);
2519 			break;
2520 
2521 		default:
2522 			kfree_skb(skb);
2523 			break;
2524 		}
2525 	}
2526 }
2527 
2528 static void hci_cmd_work(struct work_struct *work)
2529 {
2530 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2531 	struct sk_buff *skb;
2532 
2533 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2534 
2535 	/* Send queued commands */
2536 	if (atomic_read(&hdev->cmd_cnt)) {
2537 		skb = skb_dequeue(&hdev->cmd_q);
2538 		if (!skb)
2539 			return;
2540 
2541 		kfree_skb(hdev->sent_cmd);
2542 
2543 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2544 		if (hdev->sent_cmd) {
2545 			atomic_dec(&hdev->cmd_cnt);
2546 			hci_send_frame(skb);
2547 			if (test_bit(HCI_RESET, &hdev->flags))
2548 				del_timer(&hdev->cmd_timer);
2549 			else
2550 				mod_timer(&hdev->cmd_timer,
2551 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2552 		} else {
2553 			skb_queue_head(&hdev->cmd_q, skb);
2554 			queue_work(hdev->workqueue, &hdev->cmd_work);
2555 		}
2556 	}
2557 }
2558 
2559 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2560 {
2561 	/* General inquiry access code (GIAC) */
2562 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2563 	struct hci_cp_inquiry cp;
2564 
2565 	BT_DBG("%s", hdev->name);
2566 
2567 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2568 		return -EINPROGRESS;
2569 
2570 	memset(&cp, 0, sizeof(cp));
2571 	memcpy(&cp.lap, lap, sizeof(cp.lap));
2572 	cp.length  = length;
2573 
2574 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2575 }
2576 
2577 int hci_cancel_inquiry(struct hci_dev *hdev)
2578 {
2579 	BT_DBG("%s", hdev->name);
2580 
2581 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
2582 		return -EPERM;
2583 
2584 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2585 }
2586 
2587 module_param(enable_hs, bool, 0644);
2588 MODULE_PARM_DESC(enable_hs, "Enable High Speed");
2589