xref: /openbmc/linux/net/bluetooth/hci_core.c (revision ab73b751)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31 
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47 
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50 
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 
54 #define AUTO_OFF_TIMEOUT 2000
55 
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
59 
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63 
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67 
68 /* ---- HCI notifications ---- */
69 
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 	hci_sock_dev_event(hdev, event);
73 }
74 
75 /* ---- HCI requests ---- */
76 
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78 {
79 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80 
81 	/* If this is the init phase check if the completed command matches
82 	 * the last init command, and if not just return.
83 	 */
84 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 		u16 opcode = __le16_to_cpu(sent->opcode);
87 		struct sk_buff *skb;
88 
89 		/* Some CSR based controllers generate a spontaneous
90 		 * reset complete event during init and any pending
91 		 * command will never be completed. In such a case we
92 		 * need to resend whatever was the last sent
93 		 * command.
94 		 */
95 
96 		if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
97 			return;
98 
99 		skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 		if (skb) {
101 			skb_queue_head(&hdev->cmd_q, skb);
102 			queue_work(hdev->workqueue, &hdev->cmd_work);
103 		}
104 
105 		return;
106 	}
107 
108 	if (hdev->req_status == HCI_REQ_PEND) {
109 		hdev->req_result = result;
110 		hdev->req_status = HCI_REQ_DONE;
111 		wake_up_interruptible(&hdev->req_wait_q);
112 	}
113 }
114 
115 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 {
117 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
118 
119 	if (hdev->req_status == HCI_REQ_PEND) {
120 		hdev->req_result = err;
121 		hdev->req_status = HCI_REQ_CANCELED;
122 		wake_up_interruptible(&hdev->req_wait_q);
123 	}
124 }
125 
126 /* Execute request and wait for completion. */
127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128 					unsigned long opt, __u32 timeout)
129 {
130 	DECLARE_WAITQUEUE(wait, current);
131 	int err = 0;
132 
133 	BT_DBG("%s start", hdev->name);
134 
135 	hdev->req_status = HCI_REQ_PEND;
136 
137 	add_wait_queue(&hdev->req_wait_q, &wait);
138 	set_current_state(TASK_INTERRUPTIBLE);
139 
140 	req(hdev, opt);
141 	schedule_timeout(timeout);
142 
143 	remove_wait_queue(&hdev->req_wait_q, &wait);
144 
145 	if (signal_pending(current))
146 		return -EINTR;
147 
148 	switch (hdev->req_status) {
149 	case HCI_REQ_DONE:
150 		err = -bt_to_errno(hdev->req_result);
151 		break;
152 
153 	case HCI_REQ_CANCELED:
154 		err = -hdev->req_result;
155 		break;
156 
157 	default:
158 		err = -ETIMEDOUT;
159 		break;
160 	}
161 
162 	hdev->req_status = hdev->req_result = 0;
163 
164 	BT_DBG("%s end: err %d", hdev->name, err);
165 
166 	return err;
167 }
168 
169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170 					unsigned long opt, __u32 timeout)
171 {
172 	int ret;
173 
174 	if (!test_bit(HCI_UP, &hdev->flags))
175 		return -ENETDOWN;
176 
177 	/* Serialize all requests */
178 	hci_req_lock(hdev);
179 	ret = __hci_request(hdev, req, opt, timeout);
180 	hci_req_unlock(hdev);
181 
182 	return ret;
183 }
184 
185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 {
187 	BT_DBG("%s %ld", hdev->name, opt);
188 
189 	/* Reset device */
190 	set_bit(HCI_RESET, &hdev->flags);
191 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 }
193 
194 static void bredr_init(struct hci_dev *hdev)
195 {
196 	struct hci_cp_delete_stored_link_key cp;
197 	__le16 param;
198 	__u8 flt_type;
199 
200 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201 
202 	/* Mandatory initialization */
203 
204 	/* Reset */
205 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206 		set_bit(HCI_RESET, &hdev->flags);
207 		hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 	}
209 
210 	/* Read Local Supported Features */
211 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 
213 	/* Read Local Version */
214 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 
216 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
217 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218 
219 	/* Read BD Address */
220 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221 
222 	/* Read Class of Device */
223 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224 
225 	/* Read Local Name */
226 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227 
228 	/* Read Voice Setting */
229 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230 
231 	/* Optional initialization */
232 
233 	/* Clear Event Filters */
234 	flt_type = HCI_FLT_CLEAR_ALL;
235 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 
237 	/* Connection accept timeout ~20 secs */
238 	param = cpu_to_le16(0x7d00);
239 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 
241 	bacpy(&cp.bdaddr, BDADDR_ANY);
242 	cp.delete_all = 1;
243 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
244 }
245 
246 static void amp_init(struct hci_dev *hdev)
247 {
248 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249 
250 	/* Reset */
251 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252 
253 	/* Read Local Version */
254 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255 
256 	/* Read Local AMP Info */
257 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
258 }
259 
260 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
261 {
262 	struct sk_buff *skb;
263 
264 	BT_DBG("%s %ld", hdev->name, opt);
265 
266 	/* Driver initialization */
267 
268 	/* Special commands */
269 	while ((skb = skb_dequeue(&hdev->driver_init))) {
270 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
271 		skb->dev = (void *) hdev;
272 
273 		skb_queue_tail(&hdev->cmd_q, skb);
274 		queue_work(hdev->workqueue, &hdev->cmd_work);
275 	}
276 	skb_queue_purge(&hdev->driver_init);
277 
278 	switch (hdev->dev_type) {
279 	case HCI_BREDR:
280 		bredr_init(hdev);
281 		break;
282 
283 	case HCI_AMP:
284 		amp_init(hdev);
285 		break;
286 
287 	default:
288 		BT_ERR("Unknown device type %d", hdev->dev_type);
289 		break;
290 	}
291 
292 }
293 
294 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
295 {
296 	BT_DBG("%s", hdev->name);
297 
298 	/* Read LE buffer size */
299 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
300 }
301 
302 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
303 {
304 	__u8 scan = opt;
305 
306 	BT_DBG("%s %x", hdev->name, scan);
307 
308 	/* Inquiry and Page scans */
309 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
310 }
311 
312 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
313 {
314 	__u8 auth = opt;
315 
316 	BT_DBG("%s %x", hdev->name, auth);
317 
318 	/* Authentication */
319 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
320 }
321 
322 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
323 {
324 	__u8 encrypt = opt;
325 
326 	BT_DBG("%s %x", hdev->name, encrypt);
327 
328 	/* Encryption */
329 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
330 }
331 
332 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
333 {
334 	__le16 policy = cpu_to_le16(opt);
335 
336 	BT_DBG("%s %x", hdev->name, policy);
337 
338 	/* Default link policy */
339 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
340 }
341 
342 /* Get HCI device by index.
343  * Device is held on return. */
344 struct hci_dev *hci_dev_get(int index)
345 {
346 	struct hci_dev *hdev = NULL, *d;
347 
348 	BT_DBG("%d", index);
349 
350 	if (index < 0)
351 		return NULL;
352 
353 	read_lock(&hci_dev_list_lock);
354 	list_for_each_entry(d, &hci_dev_list, list) {
355 		if (d->id == index) {
356 			hdev = hci_dev_hold(d);
357 			break;
358 		}
359 	}
360 	read_unlock(&hci_dev_list_lock);
361 	return hdev;
362 }
363 
364 /* ---- Inquiry support ---- */
365 
366 bool hci_discovery_active(struct hci_dev *hdev)
367 {
368 	struct discovery_state *discov = &hdev->discovery;
369 
370 	switch (discov->state) {
371 	case DISCOVERY_FINDING:
372 	case DISCOVERY_RESOLVING:
373 		return true;
374 
375 	default:
376 		return false;
377 	}
378 }
379 
380 void hci_discovery_set_state(struct hci_dev *hdev, int state)
381 {
382 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
383 
384 	if (hdev->discovery.state == state)
385 		return;
386 
387 	switch (state) {
388 	case DISCOVERY_STOPPED:
389 		if (hdev->discovery.state != DISCOVERY_STARTING)
390 			mgmt_discovering(hdev, 0);
391 		break;
392 	case DISCOVERY_STARTING:
393 		break;
394 	case DISCOVERY_FINDING:
395 		mgmt_discovering(hdev, 1);
396 		break;
397 	case DISCOVERY_RESOLVING:
398 		break;
399 	case DISCOVERY_STOPPING:
400 		break;
401 	}
402 
403 	hdev->discovery.state = state;
404 }
405 
406 static void inquiry_cache_flush(struct hci_dev *hdev)
407 {
408 	struct discovery_state *cache = &hdev->discovery;
409 	struct inquiry_entry *p, *n;
410 
411 	list_for_each_entry_safe(p, n, &cache->all, all) {
412 		list_del(&p->all);
413 		kfree(p);
414 	}
415 
416 	INIT_LIST_HEAD(&cache->unknown);
417 	INIT_LIST_HEAD(&cache->resolve);
418 }
419 
420 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
421 {
422 	struct discovery_state *cache = &hdev->discovery;
423 	struct inquiry_entry *e;
424 
425 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426 
427 	list_for_each_entry(e, &cache->all, all) {
428 		if (!bacmp(&e->data.bdaddr, bdaddr))
429 			return e;
430 	}
431 
432 	return NULL;
433 }
434 
435 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
436 						       bdaddr_t *bdaddr)
437 {
438 	struct discovery_state *cache = &hdev->discovery;
439 	struct inquiry_entry *e;
440 
441 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
442 
443 	list_for_each_entry(e, &cache->unknown, list) {
444 		if (!bacmp(&e->data.bdaddr, bdaddr))
445 			return e;
446 	}
447 
448 	return NULL;
449 }
450 
451 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
452 						       bdaddr_t *bdaddr,
453 						       int state)
454 {
455 	struct discovery_state *cache = &hdev->discovery;
456 	struct inquiry_entry *e;
457 
458 	BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
459 
460 	list_for_each_entry(e, &cache->resolve, list) {
461 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
462 			return e;
463 		if (!bacmp(&e->data.bdaddr, bdaddr))
464 			return e;
465 	}
466 
467 	return NULL;
468 }
469 
470 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
471 				      struct inquiry_entry *ie)
472 {
473 	struct discovery_state *cache = &hdev->discovery;
474 	struct list_head *pos = &cache->resolve;
475 	struct inquiry_entry *p;
476 
477 	list_del(&ie->list);
478 
479 	list_for_each_entry(p, &cache->resolve, list) {
480 		if (p->name_state != NAME_PENDING &&
481 				abs(p->data.rssi) >= abs(ie->data.rssi))
482 			break;
483 		pos = &p->list;
484 	}
485 
486 	list_add(&ie->list, pos);
487 }
488 
489 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
490 			      bool name_known, bool *ssp)
491 {
492 	struct discovery_state *cache = &hdev->discovery;
493 	struct inquiry_entry *ie;
494 
495 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
496 
497 	if (ssp)
498 		*ssp = data->ssp_mode;
499 
500 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
501 	if (ie) {
502 		if (ie->data.ssp_mode && ssp)
503 			*ssp = true;
504 
505 		if (ie->name_state == NAME_NEEDED &&
506 						data->rssi != ie->data.rssi) {
507 			ie->data.rssi = data->rssi;
508 			hci_inquiry_cache_update_resolve(hdev, ie);
509 		}
510 
511 		goto update;
512 	}
513 
514 	/* Entry not in the cache. Add new one. */
515 	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
516 	if (!ie)
517 		return false;
518 
519 	list_add(&ie->all, &cache->all);
520 
521 	if (name_known) {
522 		ie->name_state = NAME_KNOWN;
523 	} else {
524 		ie->name_state = NAME_NOT_KNOWN;
525 		list_add(&ie->list, &cache->unknown);
526 	}
527 
528 update:
529 	if (name_known && ie->name_state != NAME_KNOWN &&
530 					ie->name_state != NAME_PENDING) {
531 		ie->name_state = NAME_KNOWN;
532 		list_del(&ie->list);
533 	}
534 
535 	memcpy(&ie->data, data, sizeof(*data));
536 	ie->timestamp = jiffies;
537 	cache->timestamp = jiffies;
538 
539 	if (ie->name_state == NAME_NOT_KNOWN)
540 		return false;
541 
542 	return true;
543 }
544 
545 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
546 {
547 	struct discovery_state *cache = &hdev->discovery;
548 	struct inquiry_info *info = (struct inquiry_info *) buf;
549 	struct inquiry_entry *e;
550 	int copied = 0;
551 
552 	list_for_each_entry(e, &cache->all, all) {
553 		struct inquiry_data *data = &e->data;
554 
555 		if (copied >= num)
556 			break;
557 
558 		bacpy(&info->bdaddr, &data->bdaddr);
559 		info->pscan_rep_mode	= data->pscan_rep_mode;
560 		info->pscan_period_mode	= data->pscan_period_mode;
561 		info->pscan_mode	= data->pscan_mode;
562 		memcpy(info->dev_class, data->dev_class, 3);
563 		info->clock_offset	= data->clock_offset;
564 
565 		info++;
566 		copied++;
567 	}
568 
569 	BT_DBG("cache %p, copied %d", cache, copied);
570 	return copied;
571 }
572 
573 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
574 {
575 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
576 	struct hci_cp_inquiry cp;
577 
578 	BT_DBG("%s", hdev->name);
579 
580 	if (test_bit(HCI_INQUIRY, &hdev->flags))
581 		return;
582 
583 	/* Start Inquiry */
584 	memcpy(&cp.lap, &ir->lap, 3);
585 	cp.length  = ir->length;
586 	cp.num_rsp = ir->num_rsp;
587 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
588 }
589 
590 int hci_inquiry(void __user *arg)
591 {
592 	__u8 __user *ptr = arg;
593 	struct hci_inquiry_req ir;
594 	struct hci_dev *hdev;
595 	int err = 0, do_inquiry = 0, max_rsp;
596 	long timeo;
597 	__u8 *buf;
598 
599 	if (copy_from_user(&ir, ptr, sizeof(ir)))
600 		return -EFAULT;
601 
602 	hdev = hci_dev_get(ir.dev_id);
603 	if (!hdev)
604 		return -ENODEV;
605 
606 	hci_dev_lock(hdev);
607 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 				inquiry_cache_empty(hdev) ||
609 				ir.flags & IREQ_CACHE_FLUSH) {
610 		inquiry_cache_flush(hdev);
611 		do_inquiry = 1;
612 	}
613 	hci_dev_unlock(hdev);
614 
615 	timeo = ir.length * msecs_to_jiffies(2000);
616 
617 	if (do_inquiry) {
618 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
619 		if (err < 0)
620 			goto done;
621 	}
622 
623 	/* for unlimited number of responses we will use buffer with 255 entries */
624 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 
626 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
627 	 * copy it to the user space.
628 	 */
629 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
630 	if (!buf) {
631 		err = -ENOMEM;
632 		goto done;
633 	}
634 
635 	hci_dev_lock(hdev);
636 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
637 	hci_dev_unlock(hdev);
638 
639 	BT_DBG("num_rsp %d", ir.num_rsp);
640 
641 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 		ptr += sizeof(ir);
643 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 					ir.num_rsp))
645 			err = -EFAULT;
646 	} else
647 		err = -EFAULT;
648 
649 	kfree(buf);
650 
651 done:
652 	hci_dev_put(hdev);
653 	return err;
654 }
655 
656 /* ---- HCI ioctl helpers ---- */
657 
658 int hci_dev_open(__u16 dev)
659 {
660 	struct hci_dev *hdev;
661 	int ret = 0;
662 
663 	hdev = hci_dev_get(dev);
664 	if (!hdev)
665 		return -ENODEV;
666 
667 	BT_DBG("%s %p", hdev->name, hdev);
668 
669 	hci_req_lock(hdev);
670 
671 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
672 		ret = -ENODEV;
673 		goto done;
674 	}
675 
676 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
677 		ret = -ERFKILL;
678 		goto done;
679 	}
680 
681 	if (test_bit(HCI_UP, &hdev->flags)) {
682 		ret = -EALREADY;
683 		goto done;
684 	}
685 
686 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
687 		set_bit(HCI_RAW, &hdev->flags);
688 
689 	/* Treat all non BR/EDR controllers as raw devices if
690 	   enable_hs is not set */
691 	if (hdev->dev_type != HCI_BREDR && !enable_hs)
692 		set_bit(HCI_RAW, &hdev->flags);
693 
694 	if (hdev->open(hdev)) {
695 		ret = -EIO;
696 		goto done;
697 	}
698 
699 	if (!test_bit(HCI_RAW, &hdev->flags)) {
700 		atomic_set(&hdev->cmd_cnt, 1);
701 		set_bit(HCI_INIT, &hdev->flags);
702 		hdev->init_last_cmd = 0;
703 
704 		ret = __hci_request(hdev, hci_init_req, 0,
705 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 
707 		if (lmp_host_le_capable(hdev))
708 			ret = __hci_request(hdev, hci_le_init_req, 0,
709 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 
711 		clear_bit(HCI_INIT, &hdev->flags);
712 	}
713 
714 	if (!ret) {
715 		hci_dev_hold(hdev);
716 		set_bit(HCI_UP, &hdev->flags);
717 		hci_notify(hdev, HCI_DEV_UP);
718 		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
719 			hci_dev_lock(hdev);
720 			mgmt_powered(hdev, 1);
721 			hci_dev_unlock(hdev);
722 		}
723 	} else {
724 		/* Init failed, cleanup */
725 		flush_work(&hdev->tx_work);
726 		flush_work(&hdev->cmd_work);
727 		flush_work(&hdev->rx_work);
728 
729 		skb_queue_purge(&hdev->cmd_q);
730 		skb_queue_purge(&hdev->rx_q);
731 
732 		if (hdev->flush)
733 			hdev->flush(hdev);
734 
735 		if (hdev->sent_cmd) {
736 			kfree_skb(hdev->sent_cmd);
737 			hdev->sent_cmd = NULL;
738 		}
739 
740 		hdev->close(hdev);
741 		hdev->flags = 0;
742 	}
743 
744 done:
745 	hci_req_unlock(hdev);
746 	hci_dev_put(hdev);
747 	return ret;
748 }
749 
750 static int hci_dev_do_close(struct hci_dev *hdev)
751 {
752 	BT_DBG("%s %p", hdev->name, hdev);
753 
754 	cancel_work_sync(&hdev->le_scan);
755 
756 	hci_req_cancel(hdev, ENODEV);
757 	hci_req_lock(hdev);
758 
759 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
760 		del_timer_sync(&hdev->cmd_timer);
761 		hci_req_unlock(hdev);
762 		return 0;
763 	}
764 
765 	/* Flush RX and TX works */
766 	flush_work(&hdev->tx_work);
767 	flush_work(&hdev->rx_work);
768 
769 	if (hdev->discov_timeout > 0) {
770 		cancel_delayed_work(&hdev->discov_off);
771 		hdev->discov_timeout = 0;
772 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
773 	}
774 
775 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
776 		cancel_delayed_work(&hdev->service_cache);
777 
778 	cancel_delayed_work_sync(&hdev->le_scan_disable);
779 
780 	hci_dev_lock(hdev);
781 	inquiry_cache_flush(hdev);
782 	hci_conn_hash_flush(hdev);
783 	hci_dev_unlock(hdev);
784 
785 	hci_notify(hdev, HCI_DEV_DOWN);
786 
787 	if (hdev->flush)
788 		hdev->flush(hdev);
789 
790 	/* Reset device */
791 	skb_queue_purge(&hdev->cmd_q);
792 	atomic_set(&hdev->cmd_cnt, 1);
793 	if (!test_bit(HCI_RAW, &hdev->flags) &&
794 				test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
795 		set_bit(HCI_INIT, &hdev->flags);
796 		__hci_request(hdev, hci_reset_req, 0,
797 					msecs_to_jiffies(250));
798 		clear_bit(HCI_INIT, &hdev->flags);
799 	}
800 
801 	/* flush cmd  work */
802 	flush_work(&hdev->cmd_work);
803 
804 	/* Drop queues */
805 	skb_queue_purge(&hdev->rx_q);
806 	skb_queue_purge(&hdev->cmd_q);
807 	skb_queue_purge(&hdev->raw_q);
808 
809 	/* Drop last sent command */
810 	if (hdev->sent_cmd) {
811 		del_timer_sync(&hdev->cmd_timer);
812 		kfree_skb(hdev->sent_cmd);
813 		hdev->sent_cmd = NULL;
814 	}
815 
816 	/* After this point our queues are empty
817 	 * and no tasks are scheduled. */
818 	hdev->close(hdev);
819 
820 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 		hci_dev_lock(hdev);
822 		mgmt_powered(hdev, 0);
823 		hci_dev_unlock(hdev);
824 	}
825 
826 	/* Clear flags */
827 	hdev->flags = 0;
828 
829 	memset(hdev->eir, 0, sizeof(hdev->eir));
830 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
831 
832 	hci_req_unlock(hdev);
833 
834 	hci_dev_put(hdev);
835 	return 0;
836 }
837 
838 int hci_dev_close(__u16 dev)
839 {
840 	struct hci_dev *hdev;
841 	int err;
842 
843 	hdev = hci_dev_get(dev);
844 	if (!hdev)
845 		return -ENODEV;
846 
847 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
848 		cancel_delayed_work(&hdev->power_off);
849 
850 	err = hci_dev_do_close(hdev);
851 
852 	hci_dev_put(hdev);
853 	return err;
854 }
855 
856 int hci_dev_reset(__u16 dev)
857 {
858 	struct hci_dev *hdev;
859 	int ret = 0;
860 
861 	hdev = hci_dev_get(dev);
862 	if (!hdev)
863 		return -ENODEV;
864 
865 	hci_req_lock(hdev);
866 
867 	if (!test_bit(HCI_UP, &hdev->flags))
868 		goto done;
869 
870 	/* Drop queues */
871 	skb_queue_purge(&hdev->rx_q);
872 	skb_queue_purge(&hdev->cmd_q);
873 
874 	hci_dev_lock(hdev);
875 	inquiry_cache_flush(hdev);
876 	hci_conn_hash_flush(hdev);
877 	hci_dev_unlock(hdev);
878 
879 	if (hdev->flush)
880 		hdev->flush(hdev);
881 
882 	atomic_set(&hdev->cmd_cnt, 1);
883 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
884 
885 	if (!test_bit(HCI_RAW, &hdev->flags))
886 		ret = __hci_request(hdev, hci_reset_req, 0,
887 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 
889 done:
890 	hci_req_unlock(hdev);
891 	hci_dev_put(hdev);
892 	return ret;
893 }
894 
895 int hci_dev_reset_stat(__u16 dev)
896 {
897 	struct hci_dev *hdev;
898 	int ret = 0;
899 
900 	hdev = hci_dev_get(dev);
901 	if (!hdev)
902 		return -ENODEV;
903 
904 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
905 
906 	hci_dev_put(hdev);
907 
908 	return ret;
909 }
910 
911 int hci_dev_cmd(unsigned int cmd, void __user *arg)
912 {
913 	struct hci_dev *hdev;
914 	struct hci_dev_req dr;
915 	int err = 0;
916 
917 	if (copy_from_user(&dr, arg, sizeof(dr)))
918 		return -EFAULT;
919 
920 	hdev = hci_dev_get(dr.dev_id);
921 	if (!hdev)
922 		return -ENODEV;
923 
924 	switch (cmd) {
925 	case HCISETAUTH:
926 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 		break;
929 
930 	case HCISETENCRYPT:
931 		if (!lmp_encrypt_capable(hdev)) {
932 			err = -EOPNOTSUPP;
933 			break;
934 		}
935 
936 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 			/* Auth must be enabled first */
938 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
940 			if (err)
941 				break;
942 		}
943 
944 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
946 		break;
947 
948 	case HCISETSCAN:
949 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
951 		break;
952 
953 	case HCISETLINKPOL:
954 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 		break;
957 
958 	case HCISETLINKMODE:
959 		hdev->link_mode = ((__u16) dr.dev_opt) &
960 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
961 		break;
962 
963 	case HCISETPTYPE:
964 		hdev->pkt_type = (__u16) dr.dev_opt;
965 		break;
966 
967 	case HCISETACLMTU:
968 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
969 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
970 		break;
971 
972 	case HCISETSCOMTU:
973 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
974 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
975 		break;
976 
977 	default:
978 		err = -EINVAL;
979 		break;
980 	}
981 
982 	hci_dev_put(hdev);
983 	return err;
984 }
985 
986 int hci_get_dev_list(void __user *arg)
987 {
988 	struct hci_dev *hdev;
989 	struct hci_dev_list_req *dl;
990 	struct hci_dev_req *dr;
991 	int n = 0, size, err;
992 	__u16 dev_num;
993 
994 	if (get_user(dev_num, (__u16 __user *) arg))
995 		return -EFAULT;
996 
997 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
998 		return -EINVAL;
999 
1000 	size = sizeof(*dl) + dev_num * sizeof(*dr);
1001 
1002 	dl = kzalloc(size, GFP_KERNEL);
1003 	if (!dl)
1004 		return -ENOMEM;
1005 
1006 	dr = dl->dev_req;
1007 
1008 	read_lock(&hci_dev_list_lock);
1009 	list_for_each_entry(hdev, &hci_dev_list, list) {
1010 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1011 			cancel_delayed_work(&hdev->power_off);
1012 
1013 		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1014 			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1015 
1016 		(dr + n)->dev_id  = hdev->id;
1017 		(dr + n)->dev_opt = hdev->flags;
1018 
1019 		if (++n >= dev_num)
1020 			break;
1021 	}
1022 	read_unlock(&hci_dev_list_lock);
1023 
1024 	dl->dev_num = n;
1025 	size = sizeof(*dl) + n * sizeof(*dr);
1026 
1027 	err = copy_to_user(arg, dl, size);
1028 	kfree(dl);
1029 
1030 	return err ? -EFAULT : 0;
1031 }
1032 
1033 int hci_get_dev_info(void __user *arg)
1034 {
1035 	struct hci_dev *hdev;
1036 	struct hci_dev_info di;
1037 	int err = 0;
1038 
1039 	if (copy_from_user(&di, arg, sizeof(di)))
1040 		return -EFAULT;
1041 
1042 	hdev = hci_dev_get(di.dev_id);
1043 	if (!hdev)
1044 		return -ENODEV;
1045 
1046 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1047 		cancel_delayed_work_sync(&hdev->power_off);
1048 
1049 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1050 		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1051 
1052 	strcpy(di.name, hdev->name);
1053 	di.bdaddr   = hdev->bdaddr;
1054 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1055 	di.flags    = hdev->flags;
1056 	di.pkt_type = hdev->pkt_type;
1057 	di.acl_mtu  = hdev->acl_mtu;
1058 	di.acl_pkts = hdev->acl_pkts;
1059 	di.sco_mtu  = hdev->sco_mtu;
1060 	di.sco_pkts = hdev->sco_pkts;
1061 	di.link_policy = hdev->link_policy;
1062 	di.link_mode   = hdev->link_mode;
1063 
1064 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1065 	memcpy(&di.features, &hdev->features, sizeof(di.features));
1066 
1067 	if (copy_to_user(arg, &di, sizeof(di)))
1068 		err = -EFAULT;
1069 
1070 	hci_dev_put(hdev);
1071 
1072 	return err;
1073 }
1074 
1075 /* ---- Interface to HCI drivers ---- */
1076 
1077 static int hci_rfkill_set_block(void *data, bool blocked)
1078 {
1079 	struct hci_dev *hdev = data;
1080 
1081 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1082 
1083 	if (!blocked)
1084 		return 0;
1085 
1086 	hci_dev_do_close(hdev);
1087 
1088 	return 0;
1089 }
1090 
1091 static const struct rfkill_ops hci_rfkill_ops = {
1092 	.set_block = hci_rfkill_set_block,
1093 };
1094 
1095 static void hci_power_on(struct work_struct *work)
1096 {
1097 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1098 
1099 	BT_DBG("%s", hdev->name);
1100 
1101 	if (hci_dev_open(hdev->id) < 0)
1102 		return;
1103 
1104 	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 		schedule_delayed_work(&hdev->power_off,
1106 					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1107 
1108 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 		mgmt_index_added(hdev);
1110 }
1111 
1112 static void hci_power_off(struct work_struct *work)
1113 {
1114 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 							power_off.work);
1116 
1117 	BT_DBG("%s", hdev->name);
1118 
1119 	hci_dev_do_close(hdev);
1120 }
1121 
1122 static void hci_discov_off(struct work_struct *work)
1123 {
1124 	struct hci_dev *hdev;
1125 	u8 scan = SCAN_PAGE;
1126 
1127 	hdev = container_of(work, struct hci_dev, discov_off.work);
1128 
1129 	BT_DBG("%s", hdev->name);
1130 
1131 	hci_dev_lock(hdev);
1132 
1133 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1134 
1135 	hdev->discov_timeout = 0;
1136 
1137 	hci_dev_unlock(hdev);
1138 }
1139 
1140 int hci_uuids_clear(struct hci_dev *hdev)
1141 {
1142 	struct list_head *p, *n;
1143 
1144 	list_for_each_safe(p, n, &hdev->uuids) {
1145 		struct bt_uuid *uuid;
1146 
1147 		uuid = list_entry(p, struct bt_uuid, list);
1148 
1149 		list_del(p);
1150 		kfree(uuid);
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 int hci_link_keys_clear(struct hci_dev *hdev)
1157 {
1158 	struct list_head *p, *n;
1159 
1160 	list_for_each_safe(p, n, &hdev->link_keys) {
1161 		struct link_key *key;
1162 
1163 		key = list_entry(p, struct link_key, list);
1164 
1165 		list_del(p);
1166 		kfree(key);
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 int hci_smp_ltks_clear(struct hci_dev *hdev)
1173 {
1174 	struct smp_ltk *k, *tmp;
1175 
1176 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1177 		list_del(&k->list);
1178 		kfree(k);
1179 	}
1180 
1181 	return 0;
1182 }
1183 
1184 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185 {
1186 	struct link_key *k;
1187 
1188 	list_for_each_entry(k, &hdev->link_keys, list)
1189 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1190 			return k;
1191 
1192 	return NULL;
1193 }
1194 
1195 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 						u8 key_type, u8 old_key_type)
1197 {
1198 	/* Legacy key */
1199 	if (key_type < 0x03)
1200 		return true;
1201 
1202 	/* Debug keys are insecure so don't store them persistently */
1203 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1204 		return false;
1205 
1206 	/* Changed combination key and there's no previous one */
1207 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1208 		return false;
1209 
1210 	/* Security mode 3 case */
1211 	if (!conn)
1212 		return true;
1213 
1214 	/* Neither local nor remote side had no-bonding as requirement */
1215 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1216 		return true;
1217 
1218 	/* Local side had dedicated bonding as requirement */
1219 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1220 		return true;
1221 
1222 	/* Remote side had dedicated bonding as requirement */
1223 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1224 		return true;
1225 
1226 	/* If none of the above criteria match, then don't store the key
1227 	 * persistently */
1228 	return false;
1229 }
1230 
1231 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1232 {
1233 	struct smp_ltk *k;
1234 
1235 	list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 		if (k->ediv != ediv ||
1237 				memcmp(rand, k->rand, sizeof(k->rand)))
1238 			continue;
1239 
1240 		return k;
1241 	}
1242 
1243 	return NULL;
1244 }
1245 EXPORT_SYMBOL(hci_find_ltk);
1246 
1247 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 				     u8 addr_type)
1249 {
1250 	struct smp_ltk *k;
1251 
1252 	list_for_each_entry(k, &hdev->long_term_keys, list)
1253 		if (addr_type == k->bdaddr_type &&
1254 					bacmp(bdaddr, &k->bdaddr) == 0)
1255 			return k;
1256 
1257 	return NULL;
1258 }
1259 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260 
1261 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1263 {
1264 	struct link_key *key, *old_key;
1265 	u8 old_key_type;
1266 	bool persistent;
1267 
1268 	old_key = hci_find_link_key(hdev, bdaddr);
1269 	if (old_key) {
1270 		old_key_type = old_key->type;
1271 		key = old_key;
1272 	} else {
1273 		old_key_type = conn ? conn->key_type : 0xff;
1274 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1275 		if (!key)
1276 			return -ENOMEM;
1277 		list_add(&key->list, &hdev->link_keys);
1278 	}
1279 
1280 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1281 
1282 	/* Some buggy controller combinations generate a changed
1283 	 * combination key for legacy pairing even when there's no
1284 	 * previous key */
1285 	if (type == HCI_LK_CHANGED_COMBINATION &&
1286 					(!conn || conn->remote_auth == 0xff) &&
1287 					old_key_type == 0xff) {
1288 		type = HCI_LK_COMBINATION;
1289 		if (conn)
1290 			conn->key_type = type;
1291 	}
1292 
1293 	bacpy(&key->bdaddr, bdaddr);
1294 	memcpy(key->val, val, 16);
1295 	key->pin_len = pin_len;
1296 
1297 	if (type == HCI_LK_CHANGED_COMBINATION)
1298 		key->type = old_key_type;
1299 	else
1300 		key->type = type;
1301 
1302 	if (!new_key)
1303 		return 0;
1304 
1305 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1306 
1307 	mgmt_new_link_key(hdev, key, persistent);
1308 
1309 	if (conn)
1310 		conn->flush_key = !persistent;
1311 
1312 	return 0;
1313 }
1314 
1315 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1316 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1317 		ediv, u8 rand[8])
1318 {
1319 	struct smp_ltk *key, *old_key;
1320 
1321 	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1322 		return 0;
1323 
1324 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1325 	if (old_key)
1326 		key = old_key;
1327 	else {
1328 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1329 		if (!key)
1330 			return -ENOMEM;
1331 		list_add(&key->list, &hdev->long_term_keys);
1332 	}
1333 
1334 	bacpy(&key->bdaddr, bdaddr);
1335 	key->bdaddr_type = addr_type;
1336 	memcpy(key->val, tk, sizeof(key->val));
1337 	key->authenticated = authenticated;
1338 	key->ediv = ediv;
1339 	key->enc_size = enc_size;
1340 	key->type = type;
1341 	memcpy(key->rand, rand, sizeof(key->rand));
1342 
1343 	if (!new_key)
1344 		return 0;
1345 
1346 	if (type & HCI_SMP_LTK)
1347 		mgmt_new_ltk(hdev, key, 1);
1348 
1349 	return 0;
1350 }
1351 
1352 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1353 {
1354 	struct link_key *key;
1355 
1356 	key = hci_find_link_key(hdev, bdaddr);
1357 	if (!key)
1358 		return -ENOENT;
1359 
1360 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1361 
1362 	list_del(&key->list);
1363 	kfree(key);
1364 
1365 	return 0;
1366 }
1367 
1368 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369 {
1370 	struct smp_ltk *k, *tmp;
1371 
1372 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1373 		if (bacmp(bdaddr, &k->bdaddr))
1374 			continue;
1375 
1376 		BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1377 
1378 		list_del(&k->list);
1379 		kfree(k);
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 /* HCI command timer function */
1386 static void hci_cmd_timer(unsigned long arg)
1387 {
1388 	struct hci_dev *hdev = (void *) arg;
1389 
1390 	BT_ERR("%s command tx timeout", hdev->name);
1391 	atomic_set(&hdev->cmd_cnt, 1);
1392 	queue_work(hdev->workqueue, &hdev->cmd_work);
1393 }
1394 
1395 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1396 					  bdaddr_t *bdaddr)
1397 {
1398 	struct oob_data *data;
1399 
1400 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1401 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1402 			return data;
1403 
1404 	return NULL;
1405 }
1406 
1407 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1408 {
1409 	struct oob_data *data;
1410 
1411 	data = hci_find_remote_oob_data(hdev, bdaddr);
1412 	if (!data)
1413 		return -ENOENT;
1414 
1415 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1416 
1417 	list_del(&data->list);
1418 	kfree(data);
1419 
1420 	return 0;
1421 }
1422 
1423 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1424 {
1425 	struct oob_data *data, *n;
1426 
1427 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1428 		list_del(&data->list);
1429 		kfree(data);
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1436 			    u8 *randomizer)
1437 {
1438 	struct oob_data *data;
1439 
1440 	data = hci_find_remote_oob_data(hdev, bdaddr);
1441 
1442 	if (!data) {
1443 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1444 		if (!data)
1445 			return -ENOMEM;
1446 
1447 		bacpy(&data->bdaddr, bdaddr);
1448 		list_add(&data->list, &hdev->remote_oob_data);
1449 	}
1450 
1451 	memcpy(data->hash, hash, sizeof(data->hash));
1452 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1453 
1454 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1455 
1456 	return 0;
1457 }
1458 
1459 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1460 {
1461 	struct bdaddr_list *b;
1462 
1463 	list_for_each_entry(b, &hdev->blacklist, list)
1464 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1465 			return b;
1466 
1467 	return NULL;
1468 }
1469 
1470 int hci_blacklist_clear(struct hci_dev *hdev)
1471 {
1472 	struct list_head *p, *n;
1473 
1474 	list_for_each_safe(p, n, &hdev->blacklist) {
1475 		struct bdaddr_list *b;
1476 
1477 		b = list_entry(p, struct bdaddr_list, list);
1478 
1479 		list_del(p);
1480 		kfree(b);
1481 	}
1482 
1483 	return 0;
1484 }
1485 
1486 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1487 {
1488 	struct bdaddr_list *entry;
1489 
1490 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1491 		return -EBADF;
1492 
1493 	if (hci_blacklist_lookup(hdev, bdaddr))
1494 		return -EEXIST;
1495 
1496 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1497 	if (!entry)
1498 		return -ENOMEM;
1499 
1500 	bacpy(&entry->bdaddr, bdaddr);
1501 
1502 	list_add(&entry->list, &hdev->blacklist);
1503 
1504 	return mgmt_device_blocked(hdev, bdaddr, type);
1505 }
1506 
1507 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1508 {
1509 	struct bdaddr_list *entry;
1510 
1511 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1512 		return hci_blacklist_clear(hdev);
1513 
1514 	entry = hci_blacklist_lookup(hdev, bdaddr);
1515 	if (!entry)
1516 		return -ENOENT;
1517 
1518 	list_del(&entry->list);
1519 	kfree(entry);
1520 
1521 	return mgmt_device_unblocked(hdev, bdaddr, type);
1522 }
1523 
1524 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1525 {
1526 	struct le_scan_params *param =  (struct le_scan_params *) opt;
1527 	struct hci_cp_le_set_scan_param cp;
1528 
1529 	memset(&cp, 0, sizeof(cp));
1530 	cp.type = param->type;
1531 	cp.interval = cpu_to_le16(param->interval);
1532 	cp.window = cpu_to_le16(param->window);
1533 
1534 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1535 }
1536 
1537 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1538 {
1539 	struct hci_cp_le_set_scan_enable cp;
1540 
1541 	memset(&cp, 0, sizeof(cp));
1542 	cp.enable = 1;
1543 
1544 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545 }
1546 
1547 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1548 			  u16 window, int timeout)
1549 {
1550 	long timeo = msecs_to_jiffies(3000);
1551 	struct le_scan_params param;
1552 	int err;
1553 
1554 	BT_DBG("%s", hdev->name);
1555 
1556 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1557 		return -EINPROGRESS;
1558 
1559 	param.type = type;
1560 	param.interval = interval;
1561 	param.window = window;
1562 
1563 	hci_req_lock(hdev);
1564 
1565 	err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1566 			    timeo);
1567 	if (!err)
1568 		err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1569 
1570 	hci_req_unlock(hdev);
1571 
1572 	if (err < 0)
1573 		return err;
1574 
1575 	schedule_delayed_work(&hdev->le_scan_disable,
1576 			      msecs_to_jiffies(timeout));
1577 
1578 	return 0;
1579 }
1580 
1581 int hci_cancel_le_scan(struct hci_dev *hdev)
1582 {
1583 	BT_DBG("%s", hdev->name);
1584 
1585 	if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1586 		return -EALREADY;
1587 
1588 	if (cancel_delayed_work(&hdev->le_scan_disable)) {
1589 		struct hci_cp_le_set_scan_enable cp;
1590 
1591 		/* Send HCI command to disable LE Scan */
1592 		memset(&cp, 0, sizeof(cp));
1593 		hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static void le_scan_disable_work(struct work_struct *work)
1600 {
1601 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1602 					    le_scan_disable.work);
1603 	struct hci_cp_le_set_scan_enable cp;
1604 
1605 	BT_DBG("%s", hdev->name);
1606 
1607 	memset(&cp, 0, sizeof(cp));
1608 
1609 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1610 }
1611 
1612 static void le_scan_work(struct work_struct *work)
1613 {
1614 	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1615 	struct le_scan_params *param = &hdev->le_scan_params;
1616 
1617 	BT_DBG("%s", hdev->name);
1618 
1619 	hci_do_le_scan(hdev, param->type, param->interval, param->window,
1620 		       param->timeout);
1621 }
1622 
1623 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1624 		int timeout)
1625 {
1626 	struct le_scan_params *param = &hdev->le_scan_params;
1627 
1628 	BT_DBG("%s", hdev->name);
1629 
1630 	if (work_busy(&hdev->le_scan))
1631 		return -EINPROGRESS;
1632 
1633 	param->type = type;
1634 	param->interval = interval;
1635 	param->window = window;
1636 	param->timeout = timeout;
1637 
1638 	queue_work(system_long_wq, &hdev->le_scan);
1639 
1640 	return 0;
1641 }
1642 
1643 /* Alloc HCI device */
1644 struct hci_dev *hci_alloc_dev(void)
1645 {
1646 	struct hci_dev *hdev;
1647 
1648 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1649 	if (!hdev)
1650 		return NULL;
1651 
1652 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1653 	hdev->esco_type = (ESCO_HV1);
1654 	hdev->link_mode = (HCI_LM_ACCEPT);
1655 	hdev->io_capability = 0x03; /* No Input No Output */
1656 
1657 	hdev->sniff_max_interval = 800;
1658 	hdev->sniff_min_interval = 80;
1659 
1660 	mutex_init(&hdev->lock);
1661 	mutex_init(&hdev->req_lock);
1662 
1663 	INIT_LIST_HEAD(&hdev->mgmt_pending);
1664 	INIT_LIST_HEAD(&hdev->blacklist);
1665 	INIT_LIST_HEAD(&hdev->uuids);
1666 	INIT_LIST_HEAD(&hdev->link_keys);
1667 	INIT_LIST_HEAD(&hdev->long_term_keys);
1668 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1669 
1670 	INIT_WORK(&hdev->rx_work, hci_rx_work);
1671 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1672 	INIT_WORK(&hdev->tx_work, hci_tx_work);
1673 	INIT_WORK(&hdev->power_on, hci_power_on);
1674 	INIT_WORK(&hdev->le_scan, le_scan_work);
1675 
1676 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1677 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1678 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1679 
1680 	skb_queue_head_init(&hdev->driver_init);
1681 	skb_queue_head_init(&hdev->rx_q);
1682 	skb_queue_head_init(&hdev->cmd_q);
1683 	skb_queue_head_init(&hdev->raw_q);
1684 
1685 	init_waitqueue_head(&hdev->req_wait_q);
1686 
1687 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1688 
1689 	hci_init_sysfs(hdev);
1690 	discovery_init(hdev);
1691 	hci_conn_hash_init(hdev);
1692 
1693 	return hdev;
1694 }
1695 EXPORT_SYMBOL(hci_alloc_dev);
1696 
1697 /* Free HCI device */
1698 void hci_free_dev(struct hci_dev *hdev)
1699 {
1700 	skb_queue_purge(&hdev->driver_init);
1701 
1702 	/* will free via device release */
1703 	put_device(&hdev->dev);
1704 }
1705 EXPORT_SYMBOL(hci_free_dev);
1706 
1707 /* Register HCI device */
1708 int hci_register_dev(struct hci_dev *hdev)
1709 {
1710 	struct list_head *head, *p;
1711 	int id, error;
1712 
1713 	if (!hdev->open || !hdev->close)
1714 		return -EINVAL;
1715 
1716 	write_lock(&hci_dev_list_lock);
1717 
1718 	/* Do not allow HCI_AMP devices to register at index 0,
1719 	 * so the index can be used as the AMP controller ID.
1720 	 */
1721 	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1722 	head = &hci_dev_list;
1723 
1724 	/* Find first available device id */
1725 	list_for_each(p, &hci_dev_list) {
1726 		int nid = list_entry(p, struct hci_dev, list)->id;
1727 		if (nid > id)
1728 			break;
1729 		if (nid == id)
1730 			id++;
1731 		head = p;
1732 	}
1733 
1734 	sprintf(hdev->name, "hci%d", id);
1735 	hdev->id = id;
1736 
1737 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738 
1739 	list_add(&hdev->list, head);
1740 
1741 	write_unlock(&hci_dev_list_lock);
1742 
1743 	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1744 							WQ_MEM_RECLAIM, 1);
1745 	if (!hdev->workqueue) {
1746 		error = -ENOMEM;
1747 		goto err;
1748 	}
1749 
1750 	error = hci_add_sysfs(hdev);
1751 	if (error < 0)
1752 		goto err_wqueue;
1753 
1754 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1755 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1756 	if (hdev->rfkill) {
1757 		if (rfkill_register(hdev->rfkill) < 0) {
1758 			rfkill_destroy(hdev->rfkill);
1759 			hdev->rfkill = NULL;
1760 		}
1761 	}
1762 
1763 	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1764 	set_bit(HCI_SETUP, &hdev->dev_flags);
1765 	schedule_work(&hdev->power_on);
1766 
1767 	hci_notify(hdev, HCI_DEV_REG);
1768 	hci_dev_hold(hdev);
1769 
1770 	return id;
1771 
1772 err_wqueue:
1773 	destroy_workqueue(hdev->workqueue);
1774 err:
1775 	write_lock(&hci_dev_list_lock);
1776 	list_del(&hdev->list);
1777 	write_unlock(&hci_dev_list_lock);
1778 
1779 	return error;
1780 }
1781 EXPORT_SYMBOL(hci_register_dev);
1782 
1783 /* Unregister HCI device */
1784 void hci_unregister_dev(struct hci_dev *hdev)
1785 {
1786 	int i;
1787 
1788 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1789 
1790 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791 
1792 	write_lock(&hci_dev_list_lock);
1793 	list_del(&hdev->list);
1794 	write_unlock(&hci_dev_list_lock);
1795 
1796 	hci_dev_do_close(hdev);
1797 
1798 	for (i = 0; i < NUM_REASSEMBLY; i++)
1799 		kfree_skb(hdev->reassembly[i]);
1800 
1801 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1802 				!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1803 		hci_dev_lock(hdev);
1804 		mgmt_index_removed(hdev);
1805 		hci_dev_unlock(hdev);
1806 	}
1807 
1808 	/* mgmt_index_removed should take care of emptying the
1809 	 * pending list */
1810 	BUG_ON(!list_empty(&hdev->mgmt_pending));
1811 
1812 	hci_notify(hdev, HCI_DEV_UNREG);
1813 
1814 	if (hdev->rfkill) {
1815 		rfkill_unregister(hdev->rfkill);
1816 		rfkill_destroy(hdev->rfkill);
1817 	}
1818 
1819 	hci_del_sysfs(hdev);
1820 
1821 	destroy_workqueue(hdev->workqueue);
1822 
1823 	hci_dev_lock(hdev);
1824 	hci_blacklist_clear(hdev);
1825 	hci_uuids_clear(hdev);
1826 	hci_link_keys_clear(hdev);
1827 	hci_smp_ltks_clear(hdev);
1828 	hci_remote_oob_data_clear(hdev);
1829 	hci_dev_unlock(hdev);
1830 
1831 	hci_dev_put(hdev);
1832 }
1833 EXPORT_SYMBOL(hci_unregister_dev);
1834 
1835 /* Suspend HCI device */
1836 int hci_suspend_dev(struct hci_dev *hdev)
1837 {
1838 	hci_notify(hdev, HCI_DEV_SUSPEND);
1839 	return 0;
1840 }
1841 EXPORT_SYMBOL(hci_suspend_dev);
1842 
1843 /* Resume HCI device */
1844 int hci_resume_dev(struct hci_dev *hdev)
1845 {
1846 	hci_notify(hdev, HCI_DEV_RESUME);
1847 	return 0;
1848 }
1849 EXPORT_SYMBOL(hci_resume_dev);
1850 
1851 /* Receive frame from HCI drivers */
1852 int hci_recv_frame(struct sk_buff *skb)
1853 {
1854 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1855 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1856 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1857 		kfree_skb(skb);
1858 		return -ENXIO;
1859 	}
1860 
1861 	/* Incomming skb */
1862 	bt_cb(skb)->incoming = 1;
1863 
1864 	/* Time stamp */
1865 	__net_timestamp(skb);
1866 
1867 	skb_queue_tail(&hdev->rx_q, skb);
1868 	queue_work(hdev->workqueue, &hdev->rx_work);
1869 
1870 	return 0;
1871 }
1872 EXPORT_SYMBOL(hci_recv_frame);
1873 
1874 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1875 						  int count, __u8 index)
1876 {
1877 	int len = 0;
1878 	int hlen = 0;
1879 	int remain = count;
1880 	struct sk_buff *skb;
1881 	struct bt_skb_cb *scb;
1882 
1883 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1884 				index >= NUM_REASSEMBLY)
1885 		return -EILSEQ;
1886 
1887 	skb = hdev->reassembly[index];
1888 
1889 	if (!skb) {
1890 		switch (type) {
1891 		case HCI_ACLDATA_PKT:
1892 			len = HCI_MAX_FRAME_SIZE;
1893 			hlen = HCI_ACL_HDR_SIZE;
1894 			break;
1895 		case HCI_EVENT_PKT:
1896 			len = HCI_MAX_EVENT_SIZE;
1897 			hlen = HCI_EVENT_HDR_SIZE;
1898 			break;
1899 		case HCI_SCODATA_PKT:
1900 			len = HCI_MAX_SCO_SIZE;
1901 			hlen = HCI_SCO_HDR_SIZE;
1902 			break;
1903 		}
1904 
1905 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1906 		if (!skb)
1907 			return -ENOMEM;
1908 
1909 		scb = (void *) skb->cb;
1910 		scb->expect = hlen;
1911 		scb->pkt_type = type;
1912 
1913 		skb->dev = (void *) hdev;
1914 		hdev->reassembly[index] = skb;
1915 	}
1916 
1917 	while (count) {
1918 		scb = (void *) skb->cb;
1919 		len = min_t(uint, scb->expect, count);
1920 
1921 		memcpy(skb_put(skb, len), data, len);
1922 
1923 		count -= len;
1924 		data += len;
1925 		scb->expect -= len;
1926 		remain = count;
1927 
1928 		switch (type) {
1929 		case HCI_EVENT_PKT:
1930 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1931 				struct hci_event_hdr *h = hci_event_hdr(skb);
1932 				scb->expect = h->plen;
1933 
1934 				if (skb_tailroom(skb) < scb->expect) {
1935 					kfree_skb(skb);
1936 					hdev->reassembly[index] = NULL;
1937 					return -ENOMEM;
1938 				}
1939 			}
1940 			break;
1941 
1942 		case HCI_ACLDATA_PKT:
1943 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1944 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1945 				scb->expect = __le16_to_cpu(h->dlen);
1946 
1947 				if (skb_tailroom(skb) < scb->expect) {
1948 					kfree_skb(skb);
1949 					hdev->reassembly[index] = NULL;
1950 					return -ENOMEM;
1951 				}
1952 			}
1953 			break;
1954 
1955 		case HCI_SCODATA_PKT:
1956 			if (skb->len == HCI_SCO_HDR_SIZE) {
1957 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1958 				scb->expect = h->dlen;
1959 
1960 				if (skb_tailroom(skb) < scb->expect) {
1961 					kfree_skb(skb);
1962 					hdev->reassembly[index] = NULL;
1963 					return -ENOMEM;
1964 				}
1965 			}
1966 			break;
1967 		}
1968 
1969 		if (scb->expect == 0) {
1970 			/* Complete frame */
1971 
1972 			bt_cb(skb)->pkt_type = type;
1973 			hci_recv_frame(skb);
1974 
1975 			hdev->reassembly[index] = NULL;
1976 			return remain;
1977 		}
1978 	}
1979 
1980 	return remain;
1981 }
1982 
1983 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1984 {
1985 	int rem = 0;
1986 
1987 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1988 		return -EILSEQ;
1989 
1990 	while (count) {
1991 		rem = hci_reassembly(hdev, type, data, count, type - 1);
1992 		if (rem < 0)
1993 			return rem;
1994 
1995 		data += (count - rem);
1996 		count = rem;
1997 	}
1998 
1999 	return rem;
2000 }
2001 EXPORT_SYMBOL(hci_recv_fragment);
2002 
2003 #define STREAM_REASSEMBLY 0
2004 
2005 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2006 {
2007 	int type;
2008 	int rem = 0;
2009 
2010 	while (count) {
2011 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2012 
2013 		if (!skb) {
2014 			struct { char type; } *pkt;
2015 
2016 			/* Start of the frame */
2017 			pkt = data;
2018 			type = pkt->type;
2019 
2020 			data++;
2021 			count--;
2022 		} else
2023 			type = bt_cb(skb)->pkt_type;
2024 
2025 		rem = hci_reassembly(hdev, type, data, count,
2026 							STREAM_REASSEMBLY);
2027 		if (rem < 0)
2028 			return rem;
2029 
2030 		data += (count - rem);
2031 		count = rem;
2032 	}
2033 
2034 	return rem;
2035 }
2036 EXPORT_SYMBOL(hci_recv_stream_fragment);
2037 
2038 /* ---- Interface to upper protocols ---- */
2039 
2040 int hci_register_cb(struct hci_cb *cb)
2041 {
2042 	BT_DBG("%p name %s", cb, cb->name);
2043 
2044 	write_lock(&hci_cb_list_lock);
2045 	list_add(&cb->list, &hci_cb_list);
2046 	write_unlock(&hci_cb_list_lock);
2047 
2048 	return 0;
2049 }
2050 EXPORT_SYMBOL(hci_register_cb);
2051 
2052 int hci_unregister_cb(struct hci_cb *cb)
2053 {
2054 	BT_DBG("%p name %s", cb, cb->name);
2055 
2056 	write_lock(&hci_cb_list_lock);
2057 	list_del(&cb->list);
2058 	write_unlock(&hci_cb_list_lock);
2059 
2060 	return 0;
2061 }
2062 EXPORT_SYMBOL(hci_unregister_cb);
2063 
2064 static int hci_send_frame(struct sk_buff *skb)
2065 {
2066 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2067 
2068 	if (!hdev) {
2069 		kfree_skb(skb);
2070 		return -ENODEV;
2071 	}
2072 
2073 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2074 
2075 	/* Time stamp */
2076 	__net_timestamp(skb);
2077 
2078 	/* Send copy to monitor */
2079 	hci_send_to_monitor(hdev, skb);
2080 
2081 	if (atomic_read(&hdev->promisc)) {
2082 		/* Send copy to the sockets */
2083 		hci_send_to_sock(hdev, skb);
2084 	}
2085 
2086 	/* Get rid of skb owner, prior to sending to the driver. */
2087 	skb_orphan(skb);
2088 
2089 	return hdev->send(skb);
2090 }
2091 
2092 /* Send HCI command */
2093 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2094 {
2095 	int len = HCI_COMMAND_HDR_SIZE + plen;
2096 	struct hci_command_hdr *hdr;
2097 	struct sk_buff *skb;
2098 
2099 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2100 
2101 	skb = bt_skb_alloc(len, GFP_ATOMIC);
2102 	if (!skb) {
2103 		BT_ERR("%s no memory for command", hdev->name);
2104 		return -ENOMEM;
2105 	}
2106 
2107 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2108 	hdr->opcode = cpu_to_le16(opcode);
2109 	hdr->plen   = plen;
2110 
2111 	if (plen)
2112 		memcpy(skb_put(skb, plen), param, plen);
2113 
2114 	BT_DBG("skb len %d", skb->len);
2115 
2116 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2117 	skb->dev = (void *) hdev;
2118 
2119 	if (test_bit(HCI_INIT, &hdev->flags))
2120 		hdev->init_last_cmd = opcode;
2121 
2122 	skb_queue_tail(&hdev->cmd_q, skb);
2123 	queue_work(hdev->workqueue, &hdev->cmd_work);
2124 
2125 	return 0;
2126 }
2127 
2128 /* Get data from the previously sent command */
2129 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2130 {
2131 	struct hci_command_hdr *hdr;
2132 
2133 	if (!hdev->sent_cmd)
2134 		return NULL;
2135 
2136 	hdr = (void *) hdev->sent_cmd->data;
2137 
2138 	if (hdr->opcode != cpu_to_le16(opcode))
2139 		return NULL;
2140 
2141 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2142 
2143 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2144 }
2145 
2146 /* Send ACL data */
2147 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2148 {
2149 	struct hci_acl_hdr *hdr;
2150 	int len = skb->len;
2151 
2152 	skb_push(skb, HCI_ACL_HDR_SIZE);
2153 	skb_reset_transport_header(skb);
2154 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2155 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2156 	hdr->dlen   = cpu_to_le16(len);
2157 }
2158 
2159 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2160 				struct sk_buff *skb, __u16 flags)
2161 {
2162 	struct hci_dev *hdev = conn->hdev;
2163 	struct sk_buff *list;
2164 
2165 	skb->len = skb_headlen(skb);
2166 	skb->data_len = 0;
2167 
2168 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2169 	hci_add_acl_hdr(skb, conn->handle, flags);
2170 
2171 	list = skb_shinfo(skb)->frag_list;
2172 	if (!list) {
2173 		/* Non fragmented */
2174 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2175 
2176 		skb_queue_tail(queue, skb);
2177 	} else {
2178 		/* Fragmented */
2179 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2180 
2181 		skb_shinfo(skb)->frag_list = NULL;
2182 
2183 		/* Queue all fragments atomically */
2184 		spin_lock(&queue->lock);
2185 
2186 		__skb_queue_tail(queue, skb);
2187 
2188 		flags &= ~ACL_START;
2189 		flags |= ACL_CONT;
2190 		do {
2191 			skb = list; list = list->next;
2192 
2193 			skb->dev = (void *) hdev;
2194 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2195 			hci_add_acl_hdr(skb, conn->handle, flags);
2196 
2197 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2198 
2199 			__skb_queue_tail(queue, skb);
2200 		} while (list);
2201 
2202 		spin_unlock(&queue->lock);
2203 	}
2204 }
2205 
2206 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2207 {
2208 	struct hci_conn *conn = chan->conn;
2209 	struct hci_dev *hdev = conn->hdev;
2210 
2211 	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2212 
2213 	skb->dev = (void *) hdev;
2214 
2215 	hci_queue_acl(conn, &chan->data_q, skb, flags);
2216 
2217 	queue_work(hdev->workqueue, &hdev->tx_work);
2218 }
2219 EXPORT_SYMBOL(hci_send_acl);
2220 
2221 /* Send SCO data */
2222 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2223 {
2224 	struct hci_dev *hdev = conn->hdev;
2225 	struct hci_sco_hdr hdr;
2226 
2227 	BT_DBG("%s len %d", hdev->name, skb->len);
2228 
2229 	hdr.handle = cpu_to_le16(conn->handle);
2230 	hdr.dlen   = skb->len;
2231 
2232 	skb_push(skb, HCI_SCO_HDR_SIZE);
2233 	skb_reset_transport_header(skb);
2234 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2235 
2236 	skb->dev = (void *) hdev;
2237 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2238 
2239 	skb_queue_tail(&conn->data_q, skb);
2240 	queue_work(hdev->workqueue, &hdev->tx_work);
2241 }
2242 EXPORT_SYMBOL(hci_send_sco);
2243 
2244 /* ---- HCI TX task (outgoing data) ---- */
2245 
2246 /* HCI Connection scheduler */
2247 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2248 {
2249 	struct hci_conn_hash *h = &hdev->conn_hash;
2250 	struct hci_conn *conn = NULL, *c;
2251 	unsigned int num = 0, min = ~0;
2252 
2253 	/* We don't have to lock device here. Connections are always
2254 	 * added and removed with TX task disabled. */
2255 
2256 	rcu_read_lock();
2257 
2258 	list_for_each_entry_rcu(c, &h->list, list) {
2259 		if (c->type != type || skb_queue_empty(&c->data_q))
2260 			continue;
2261 
2262 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2263 			continue;
2264 
2265 		num++;
2266 
2267 		if (c->sent < min) {
2268 			min  = c->sent;
2269 			conn = c;
2270 		}
2271 
2272 		if (hci_conn_num(hdev, type) == num)
2273 			break;
2274 	}
2275 
2276 	rcu_read_unlock();
2277 
2278 	if (conn) {
2279 		int cnt, q;
2280 
2281 		switch (conn->type) {
2282 		case ACL_LINK:
2283 			cnt = hdev->acl_cnt;
2284 			break;
2285 		case SCO_LINK:
2286 		case ESCO_LINK:
2287 			cnt = hdev->sco_cnt;
2288 			break;
2289 		case LE_LINK:
2290 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2291 			break;
2292 		default:
2293 			cnt = 0;
2294 			BT_ERR("Unknown link type");
2295 		}
2296 
2297 		q = cnt / num;
2298 		*quote = q ? q : 1;
2299 	} else
2300 		*quote = 0;
2301 
2302 	BT_DBG("conn %p quote %d", conn, *quote);
2303 	return conn;
2304 }
2305 
2306 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2307 {
2308 	struct hci_conn_hash *h = &hdev->conn_hash;
2309 	struct hci_conn *c;
2310 
2311 	BT_ERR("%s link tx timeout", hdev->name);
2312 
2313 	rcu_read_lock();
2314 
2315 	/* Kill stalled connections */
2316 	list_for_each_entry_rcu(c, &h->list, list) {
2317 		if (c->type == type && c->sent) {
2318 			BT_ERR("%s killing stalled connection %s",
2319 				hdev->name, batostr(&c->dst));
2320 			hci_acl_disconn(c, 0x13);
2321 		}
2322 	}
2323 
2324 	rcu_read_unlock();
2325 }
2326 
2327 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2328 						int *quote)
2329 {
2330 	struct hci_conn_hash *h = &hdev->conn_hash;
2331 	struct hci_chan *chan = NULL;
2332 	unsigned int num = 0, min = ~0, cur_prio = 0;
2333 	struct hci_conn *conn;
2334 	int cnt, q, conn_num = 0;
2335 
2336 	BT_DBG("%s", hdev->name);
2337 
2338 	rcu_read_lock();
2339 
2340 	list_for_each_entry_rcu(conn, &h->list, list) {
2341 		struct hci_chan *tmp;
2342 
2343 		if (conn->type != type)
2344 			continue;
2345 
2346 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2347 			continue;
2348 
2349 		conn_num++;
2350 
2351 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2352 			struct sk_buff *skb;
2353 
2354 			if (skb_queue_empty(&tmp->data_q))
2355 				continue;
2356 
2357 			skb = skb_peek(&tmp->data_q);
2358 			if (skb->priority < cur_prio)
2359 				continue;
2360 
2361 			if (skb->priority > cur_prio) {
2362 				num = 0;
2363 				min = ~0;
2364 				cur_prio = skb->priority;
2365 			}
2366 
2367 			num++;
2368 
2369 			if (conn->sent < min) {
2370 				min  = conn->sent;
2371 				chan = tmp;
2372 			}
2373 		}
2374 
2375 		if (hci_conn_num(hdev, type) == conn_num)
2376 			break;
2377 	}
2378 
2379 	rcu_read_unlock();
2380 
2381 	if (!chan)
2382 		return NULL;
2383 
2384 	switch (chan->conn->type) {
2385 	case ACL_LINK:
2386 		cnt = hdev->acl_cnt;
2387 		break;
2388 	case SCO_LINK:
2389 	case ESCO_LINK:
2390 		cnt = hdev->sco_cnt;
2391 		break;
2392 	case LE_LINK:
2393 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2394 		break;
2395 	default:
2396 		cnt = 0;
2397 		BT_ERR("Unknown link type");
2398 	}
2399 
2400 	q = cnt / num;
2401 	*quote = q ? q : 1;
2402 	BT_DBG("chan %p quote %d", chan, *quote);
2403 	return chan;
2404 }
2405 
2406 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2407 {
2408 	struct hci_conn_hash *h = &hdev->conn_hash;
2409 	struct hci_conn *conn;
2410 	int num = 0;
2411 
2412 	BT_DBG("%s", hdev->name);
2413 
2414 	rcu_read_lock();
2415 
2416 	list_for_each_entry_rcu(conn, &h->list, list) {
2417 		struct hci_chan *chan;
2418 
2419 		if (conn->type != type)
2420 			continue;
2421 
2422 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2423 			continue;
2424 
2425 		num++;
2426 
2427 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2428 			struct sk_buff *skb;
2429 
2430 			if (chan->sent) {
2431 				chan->sent = 0;
2432 				continue;
2433 			}
2434 
2435 			if (skb_queue_empty(&chan->data_q))
2436 				continue;
2437 
2438 			skb = skb_peek(&chan->data_q);
2439 			if (skb->priority >= HCI_PRIO_MAX - 1)
2440 				continue;
2441 
2442 			skb->priority = HCI_PRIO_MAX - 1;
2443 
2444 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2445 								skb->priority);
2446 		}
2447 
2448 		if (hci_conn_num(hdev, type) == num)
2449 			break;
2450 	}
2451 
2452 	rcu_read_unlock();
2453 
2454 }
2455 
2456 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2457 {
2458 	/* Calculate count of blocks used by this packet */
2459 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2460 }
2461 
2462 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463 {
2464 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2465 		/* ACL tx timeout must be longer than maximum
2466 		 * link supervision timeout (40.9 seconds) */
2467 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 					msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2469 			hci_link_tx_to(hdev, ACL_LINK);
2470 	}
2471 }
2472 
2473 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2474 {
2475 	unsigned int cnt = hdev->acl_cnt;
2476 	struct hci_chan *chan;
2477 	struct sk_buff *skb;
2478 	int quote;
2479 
2480 	__check_timeout(hdev, cnt);
2481 
2482 	while (hdev->acl_cnt &&
2483 			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2484 		u32 priority = (skb_peek(&chan->data_q))->priority;
2485 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2486 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2487 					skb->len, skb->priority);
2488 
2489 			/* Stop if priority has changed */
2490 			if (skb->priority < priority)
2491 				break;
2492 
2493 			skb = skb_dequeue(&chan->data_q);
2494 
2495 			hci_conn_enter_active_mode(chan->conn,
2496 						   bt_cb(skb)->force_active);
2497 
2498 			hci_send_frame(skb);
2499 			hdev->acl_last_tx = jiffies;
2500 
2501 			hdev->acl_cnt--;
2502 			chan->sent++;
2503 			chan->conn->sent++;
2504 		}
2505 	}
2506 
2507 	if (cnt != hdev->acl_cnt)
2508 		hci_prio_recalculate(hdev, ACL_LINK);
2509 }
2510 
2511 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2512 {
2513 	unsigned int cnt = hdev->block_cnt;
2514 	struct hci_chan *chan;
2515 	struct sk_buff *skb;
2516 	int quote;
2517 
2518 	__check_timeout(hdev, cnt);
2519 
2520 	while (hdev->block_cnt > 0 &&
2521 			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 		u32 priority = (skb_peek(&chan->data_q))->priority;
2523 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 			int blocks;
2525 
2526 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 						skb->len, skb->priority);
2528 
2529 			/* Stop if priority has changed */
2530 			if (skb->priority < priority)
2531 				break;
2532 
2533 			skb = skb_dequeue(&chan->data_q);
2534 
2535 			blocks = __get_blocks(hdev, skb);
2536 			if (blocks > hdev->block_cnt)
2537 				return;
2538 
2539 			hci_conn_enter_active_mode(chan->conn,
2540 						bt_cb(skb)->force_active);
2541 
2542 			hci_send_frame(skb);
2543 			hdev->acl_last_tx = jiffies;
2544 
2545 			hdev->block_cnt -= blocks;
2546 			quote -= blocks;
2547 
2548 			chan->sent += blocks;
2549 			chan->conn->sent += blocks;
2550 		}
2551 	}
2552 
2553 	if (cnt != hdev->block_cnt)
2554 		hci_prio_recalculate(hdev, ACL_LINK);
2555 }
2556 
2557 static inline void hci_sched_acl(struct hci_dev *hdev)
2558 {
2559 	BT_DBG("%s", hdev->name);
2560 
2561 	if (!hci_conn_num(hdev, ACL_LINK))
2562 		return;
2563 
2564 	switch (hdev->flow_ctl_mode) {
2565 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
2566 		hci_sched_acl_pkt(hdev);
2567 		break;
2568 
2569 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2570 		hci_sched_acl_blk(hdev);
2571 		break;
2572 	}
2573 }
2574 
2575 /* Schedule SCO */
2576 static inline void hci_sched_sco(struct hci_dev *hdev)
2577 {
2578 	struct hci_conn *conn;
2579 	struct sk_buff *skb;
2580 	int quote;
2581 
2582 	BT_DBG("%s", hdev->name);
2583 
2584 	if (!hci_conn_num(hdev, SCO_LINK))
2585 		return;
2586 
2587 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2588 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2589 			BT_DBG("skb %p len %d", skb, skb->len);
2590 			hci_send_frame(skb);
2591 
2592 			conn->sent++;
2593 			if (conn->sent == ~0)
2594 				conn->sent = 0;
2595 		}
2596 	}
2597 }
2598 
2599 static inline void hci_sched_esco(struct hci_dev *hdev)
2600 {
2601 	struct hci_conn *conn;
2602 	struct sk_buff *skb;
2603 	int quote;
2604 
2605 	BT_DBG("%s", hdev->name);
2606 
2607 	if (!hci_conn_num(hdev, ESCO_LINK))
2608 		return;
2609 
2610 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2611 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 			BT_DBG("skb %p len %d", skb, skb->len);
2613 			hci_send_frame(skb);
2614 
2615 			conn->sent++;
2616 			if (conn->sent == ~0)
2617 				conn->sent = 0;
2618 		}
2619 	}
2620 }
2621 
2622 static inline void hci_sched_le(struct hci_dev *hdev)
2623 {
2624 	struct hci_chan *chan;
2625 	struct sk_buff *skb;
2626 	int quote, cnt, tmp;
2627 
2628 	BT_DBG("%s", hdev->name);
2629 
2630 	if (!hci_conn_num(hdev, LE_LINK))
2631 		return;
2632 
2633 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2634 		/* LE tx timeout must be longer than maximum
2635 		 * link supervision timeout (40.9 seconds) */
2636 		if (!hdev->le_cnt && hdev->le_pkts &&
2637 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2638 			hci_link_tx_to(hdev, LE_LINK);
2639 	}
2640 
2641 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2642 	tmp = cnt;
2643 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2644 		u32 priority = (skb_peek(&chan->data_q))->priority;
2645 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2646 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2647 					skb->len, skb->priority);
2648 
2649 			/* Stop if priority has changed */
2650 			if (skb->priority < priority)
2651 				break;
2652 
2653 			skb = skb_dequeue(&chan->data_q);
2654 
2655 			hci_send_frame(skb);
2656 			hdev->le_last_tx = jiffies;
2657 
2658 			cnt--;
2659 			chan->sent++;
2660 			chan->conn->sent++;
2661 		}
2662 	}
2663 
2664 	if (hdev->le_pkts)
2665 		hdev->le_cnt = cnt;
2666 	else
2667 		hdev->acl_cnt = cnt;
2668 
2669 	if (cnt != tmp)
2670 		hci_prio_recalculate(hdev, LE_LINK);
2671 }
2672 
2673 static void hci_tx_work(struct work_struct *work)
2674 {
2675 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2676 	struct sk_buff *skb;
2677 
2678 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2679 		hdev->sco_cnt, hdev->le_cnt);
2680 
2681 	/* Schedule queues and send stuff to HCI driver */
2682 
2683 	hci_sched_acl(hdev);
2684 
2685 	hci_sched_sco(hdev);
2686 
2687 	hci_sched_esco(hdev);
2688 
2689 	hci_sched_le(hdev);
2690 
2691 	/* Send next queued raw (unknown type) packet */
2692 	while ((skb = skb_dequeue(&hdev->raw_q)))
2693 		hci_send_frame(skb);
2694 }
2695 
2696 /* ----- HCI RX task (incoming data processing) ----- */
2697 
2698 /* ACL data packet */
2699 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700 {
2701 	struct hci_acl_hdr *hdr = (void *) skb->data;
2702 	struct hci_conn *conn;
2703 	__u16 handle, flags;
2704 
2705 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2706 
2707 	handle = __le16_to_cpu(hdr->handle);
2708 	flags  = hci_flags(handle);
2709 	handle = hci_handle(handle);
2710 
2711 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2712 
2713 	hdev->stat.acl_rx++;
2714 
2715 	hci_dev_lock(hdev);
2716 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2717 	hci_dev_unlock(hdev);
2718 
2719 	if (conn) {
2720 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2721 
2722 		hci_dev_lock(hdev);
2723 		if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2724 		    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2725 			mgmt_device_connected(hdev, &conn->dst, conn->type,
2726 					      conn->dst_type, 0, NULL, 0,
2727 					      conn->dev_class);
2728 		hci_dev_unlock(hdev);
2729 
2730 		/* Send to upper protocol */
2731 		l2cap_recv_acldata(conn, skb, flags);
2732 		return;
2733 	} else {
2734 		BT_ERR("%s ACL packet for unknown connection handle %d",
2735 			hdev->name, handle);
2736 	}
2737 
2738 	kfree_skb(skb);
2739 }
2740 
2741 /* SCO data packet */
2742 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743 {
2744 	struct hci_sco_hdr *hdr = (void *) skb->data;
2745 	struct hci_conn *conn;
2746 	__u16 handle;
2747 
2748 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2749 
2750 	handle = __le16_to_cpu(hdr->handle);
2751 
2752 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2753 
2754 	hdev->stat.sco_rx++;
2755 
2756 	hci_dev_lock(hdev);
2757 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2758 	hci_dev_unlock(hdev);
2759 
2760 	if (conn) {
2761 		/* Send to upper protocol */
2762 		sco_recv_scodata(conn, skb);
2763 		return;
2764 	} else {
2765 		BT_ERR("%s SCO packet for unknown connection handle %d",
2766 			hdev->name, handle);
2767 	}
2768 
2769 	kfree_skb(skb);
2770 }
2771 
2772 static void hci_rx_work(struct work_struct *work)
2773 {
2774 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2775 	struct sk_buff *skb;
2776 
2777 	BT_DBG("%s", hdev->name);
2778 
2779 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2780 		/* Send copy to monitor */
2781 		hci_send_to_monitor(hdev, skb);
2782 
2783 		if (atomic_read(&hdev->promisc)) {
2784 			/* Send copy to the sockets */
2785 			hci_send_to_sock(hdev, skb);
2786 		}
2787 
2788 		if (test_bit(HCI_RAW, &hdev->flags)) {
2789 			kfree_skb(skb);
2790 			continue;
2791 		}
2792 
2793 		if (test_bit(HCI_INIT, &hdev->flags)) {
2794 			/* Don't process data packets in this states. */
2795 			switch (bt_cb(skb)->pkt_type) {
2796 			case HCI_ACLDATA_PKT:
2797 			case HCI_SCODATA_PKT:
2798 				kfree_skb(skb);
2799 				continue;
2800 			}
2801 		}
2802 
2803 		/* Process frame */
2804 		switch (bt_cb(skb)->pkt_type) {
2805 		case HCI_EVENT_PKT:
2806 			BT_DBG("%s Event packet", hdev->name);
2807 			hci_event_packet(hdev, skb);
2808 			break;
2809 
2810 		case HCI_ACLDATA_PKT:
2811 			BT_DBG("%s ACL data packet", hdev->name);
2812 			hci_acldata_packet(hdev, skb);
2813 			break;
2814 
2815 		case HCI_SCODATA_PKT:
2816 			BT_DBG("%s SCO data packet", hdev->name);
2817 			hci_scodata_packet(hdev, skb);
2818 			break;
2819 
2820 		default:
2821 			kfree_skb(skb);
2822 			break;
2823 		}
2824 	}
2825 }
2826 
2827 static void hci_cmd_work(struct work_struct *work)
2828 {
2829 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2830 	struct sk_buff *skb;
2831 
2832 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2833 
2834 	/* Send queued commands */
2835 	if (atomic_read(&hdev->cmd_cnt)) {
2836 		skb = skb_dequeue(&hdev->cmd_q);
2837 		if (!skb)
2838 			return;
2839 
2840 		kfree_skb(hdev->sent_cmd);
2841 
2842 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2843 		if (hdev->sent_cmd) {
2844 			atomic_dec(&hdev->cmd_cnt);
2845 			hci_send_frame(skb);
2846 			if (test_bit(HCI_RESET, &hdev->flags))
2847 				del_timer(&hdev->cmd_timer);
2848 			else
2849 				mod_timer(&hdev->cmd_timer,
2850 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2851 		} else {
2852 			skb_queue_head(&hdev->cmd_q, skb);
2853 			queue_work(hdev->workqueue, &hdev->cmd_work);
2854 		}
2855 	}
2856 }
2857 
2858 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2859 {
2860 	/* General inquiry access code (GIAC) */
2861 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2862 	struct hci_cp_inquiry cp;
2863 
2864 	BT_DBG("%s", hdev->name);
2865 
2866 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2867 		return -EINPROGRESS;
2868 
2869 	inquiry_cache_flush(hdev);
2870 
2871 	memset(&cp, 0, sizeof(cp));
2872 	memcpy(&cp.lap, lap, sizeof(cp.lap));
2873 	cp.length  = length;
2874 
2875 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2876 }
2877 
2878 int hci_cancel_inquiry(struct hci_dev *hdev)
2879 {
2880 	BT_DBG("%s", hdev->name);
2881 
2882 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
2883 		return -EALREADY;
2884 
2885 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2886 }
2887 
2888 u8 bdaddr_to_le(u8 bdaddr_type)
2889 {
2890 	switch (bdaddr_type) {
2891 	case BDADDR_LE_PUBLIC:
2892 		return ADDR_LE_DEV_PUBLIC;
2893 
2894 	default:
2895 		/* Fallback to LE Random address type */
2896 		return ADDR_LE_DEV_RANDOM;
2897 	}
2898 }
2899