xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 
31 #include <linux/rfkill.h>
32 
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35 
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39 
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43 
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47 
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50 
51 /* ---- HCI notifications ---- */
52 
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 	hci_sock_dev_event(hdev, event);
56 }
57 
58 /* ---- HCI requests ---- */
59 
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62 	BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63 
64 	/* If this is the init phase check if the completed command matches
65 	 * the last init command, and if not just return.
66 	 */
67 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 		u16 opcode = __le16_to_cpu(sent->opcode);
70 		struct sk_buff *skb;
71 
72 		/* Some CSR based controllers generate a spontaneous
73 		 * reset complete event during init and any pending
74 		 * command will never be completed. In such a case we
75 		 * need to resend whatever was the last sent
76 		 * command.
77 		 */
78 
79 		if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 			return;
81 
82 		skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 		if (skb) {
84 			skb_queue_head(&hdev->cmd_q, skb);
85 			queue_work(hdev->workqueue, &hdev->cmd_work);
86 		}
87 
88 		return;
89 	}
90 
91 	if (hdev->req_status == HCI_REQ_PEND) {
92 		hdev->req_result = result;
93 		hdev->req_status = HCI_REQ_DONE;
94 		wake_up_interruptible(&hdev->req_wait_q);
95 	}
96 }
97 
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
101 
102 	if (hdev->req_status == HCI_REQ_PEND) {
103 		hdev->req_result = err;
104 		hdev->req_status = HCI_REQ_CANCELED;
105 		wake_up_interruptible(&hdev->req_wait_q);
106 	}
107 }
108 
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 			 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 			 unsigned long opt, __u32 timeout)
113 {
114 	DECLARE_WAITQUEUE(wait, current);
115 	int err = 0;
116 
117 	BT_DBG("%s start", hdev->name);
118 
119 	hdev->req_status = HCI_REQ_PEND;
120 
121 	add_wait_queue(&hdev->req_wait_q, &wait);
122 	set_current_state(TASK_INTERRUPTIBLE);
123 
124 	req(hdev, opt);
125 	schedule_timeout(timeout);
126 
127 	remove_wait_queue(&hdev->req_wait_q, &wait);
128 
129 	if (signal_pending(current))
130 		return -EINTR;
131 
132 	switch (hdev->req_status) {
133 	case HCI_REQ_DONE:
134 		err = -bt_to_errno(hdev->req_result);
135 		break;
136 
137 	case HCI_REQ_CANCELED:
138 		err = -hdev->req_result;
139 		break;
140 
141 	default:
142 		err = -ETIMEDOUT;
143 		break;
144 	}
145 
146 	hdev->req_status = hdev->req_result = 0;
147 
148 	BT_DBG("%s end: err %d", hdev->name, err);
149 
150 	return err;
151 }
152 
153 static int hci_request(struct hci_dev *hdev,
154 		       void (*req)(struct hci_dev *hdev, unsigned long opt),
155 		       unsigned long opt, __u32 timeout)
156 {
157 	int ret;
158 
159 	if (!test_bit(HCI_UP, &hdev->flags))
160 		return -ENETDOWN;
161 
162 	/* Serialize all requests */
163 	hci_req_lock(hdev);
164 	ret = __hci_request(hdev, req, opt, timeout);
165 	hci_req_unlock(hdev);
166 
167 	return ret;
168 }
169 
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172 	BT_DBG("%s %ld", hdev->name, opt);
173 
174 	/* Reset device */
175 	set_bit(HCI_RESET, &hdev->flags);
176 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178 
179 static void bredr_init(struct hci_dev *hdev)
180 {
181 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182 
183 	/* Read Local Supported Features */
184 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185 
186 	/* Read Local Version */
187 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
188 }
189 
190 static void amp_init(struct hci_dev *hdev)
191 {
192 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193 
194 	/* Read Local Version */
195 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196 
197 	/* Read Local AMP Info */
198 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199 
200 	/* Read Data Blk size */
201 	hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202 }
203 
204 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205 {
206 	struct sk_buff *skb;
207 
208 	BT_DBG("%s %ld", hdev->name, opt);
209 
210 	/* Driver initialization */
211 
212 	/* Special commands */
213 	while ((skb = skb_dequeue(&hdev->driver_init))) {
214 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 		skb->dev = (void *) hdev;
216 
217 		skb_queue_tail(&hdev->cmd_q, skb);
218 		queue_work(hdev->workqueue, &hdev->cmd_work);
219 	}
220 	skb_queue_purge(&hdev->driver_init);
221 
222 	/* Reset */
223 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 		hci_reset_req(hdev, 0);
225 
226 	switch (hdev->dev_type) {
227 	case HCI_BREDR:
228 		bredr_init(hdev);
229 		break;
230 
231 	case HCI_AMP:
232 		amp_init(hdev);
233 		break;
234 
235 	default:
236 		BT_ERR("Unknown device type %d", hdev->dev_type);
237 		break;
238 	}
239 }
240 
241 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 {
243 	__u8 scan = opt;
244 
245 	BT_DBG("%s %x", hdev->name, scan);
246 
247 	/* Inquiry and Page scans */
248 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249 }
250 
251 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 {
253 	__u8 auth = opt;
254 
255 	BT_DBG("%s %x", hdev->name, auth);
256 
257 	/* Authentication */
258 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259 }
260 
261 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 {
263 	__u8 encrypt = opt;
264 
265 	BT_DBG("%s %x", hdev->name, encrypt);
266 
267 	/* Encryption */
268 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269 }
270 
271 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272 {
273 	__le16 policy = cpu_to_le16(opt);
274 
275 	BT_DBG("%s %x", hdev->name, policy);
276 
277 	/* Default link policy */
278 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279 }
280 
281 /* Get HCI device by index.
282  * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285 	struct hci_dev *hdev = NULL, *d;
286 
287 	BT_DBG("%d", index);
288 
289 	if (index < 0)
290 		return NULL;
291 
292 	read_lock(&hci_dev_list_lock);
293 	list_for_each_entry(d, &hci_dev_list, list) {
294 		if (d->id == index) {
295 			hdev = hci_dev_hold(d);
296 			break;
297 		}
298 	}
299 	read_unlock(&hci_dev_list_lock);
300 	return hdev;
301 }
302 
303 /* ---- Inquiry support ---- */
304 
305 bool hci_discovery_active(struct hci_dev *hdev)
306 {
307 	struct discovery_state *discov = &hdev->discovery;
308 
309 	switch (discov->state) {
310 	case DISCOVERY_FINDING:
311 	case DISCOVERY_RESOLVING:
312 		return true;
313 
314 	default:
315 		return false;
316 	}
317 }
318 
319 void hci_discovery_set_state(struct hci_dev *hdev, int state)
320 {
321 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322 
323 	if (hdev->discovery.state == state)
324 		return;
325 
326 	switch (state) {
327 	case DISCOVERY_STOPPED:
328 		if (hdev->discovery.state != DISCOVERY_STARTING)
329 			mgmt_discovering(hdev, 0);
330 		break;
331 	case DISCOVERY_STARTING:
332 		break;
333 	case DISCOVERY_FINDING:
334 		mgmt_discovering(hdev, 1);
335 		break;
336 	case DISCOVERY_RESOLVING:
337 		break;
338 	case DISCOVERY_STOPPING:
339 		break;
340 	}
341 
342 	hdev->discovery.state = state;
343 }
344 
345 static void inquiry_cache_flush(struct hci_dev *hdev)
346 {
347 	struct discovery_state *cache = &hdev->discovery;
348 	struct inquiry_entry *p, *n;
349 
350 	list_for_each_entry_safe(p, n, &cache->all, all) {
351 		list_del(&p->all);
352 		kfree(p);
353 	}
354 
355 	INIT_LIST_HEAD(&cache->unknown);
356 	INIT_LIST_HEAD(&cache->resolve);
357 }
358 
359 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 					       bdaddr_t *bdaddr)
361 {
362 	struct discovery_state *cache = &hdev->discovery;
363 	struct inquiry_entry *e;
364 
365 	BT_DBG("cache %p, %pMR", cache, bdaddr);
366 
367 	list_for_each_entry(e, &cache->all, all) {
368 		if (!bacmp(&e->data.bdaddr, bdaddr))
369 			return e;
370 	}
371 
372 	return NULL;
373 }
374 
375 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
376 						       bdaddr_t *bdaddr)
377 {
378 	struct discovery_state *cache = &hdev->discovery;
379 	struct inquiry_entry *e;
380 
381 	BT_DBG("cache %p, %pMR", cache, bdaddr);
382 
383 	list_for_each_entry(e, &cache->unknown, list) {
384 		if (!bacmp(&e->data.bdaddr, bdaddr))
385 			return e;
386 	}
387 
388 	return NULL;
389 }
390 
391 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
392 						       bdaddr_t *bdaddr,
393 						       int state)
394 {
395 	struct discovery_state *cache = &hdev->discovery;
396 	struct inquiry_entry *e;
397 
398 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
399 
400 	list_for_each_entry(e, &cache->resolve, list) {
401 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 			return e;
403 		if (!bacmp(&e->data.bdaddr, bdaddr))
404 			return e;
405 	}
406 
407 	return NULL;
408 }
409 
410 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
411 				      struct inquiry_entry *ie)
412 {
413 	struct discovery_state *cache = &hdev->discovery;
414 	struct list_head *pos = &cache->resolve;
415 	struct inquiry_entry *p;
416 
417 	list_del(&ie->list);
418 
419 	list_for_each_entry(p, &cache->resolve, list) {
420 		if (p->name_state != NAME_PENDING &&
421 		    abs(p->data.rssi) >= abs(ie->data.rssi))
422 			break;
423 		pos = &p->list;
424 	}
425 
426 	list_add(&ie->list, pos);
427 }
428 
429 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
430 			      bool name_known, bool *ssp)
431 {
432 	struct discovery_state *cache = &hdev->discovery;
433 	struct inquiry_entry *ie;
434 
435 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436 
437 	hci_remove_remote_oob_data(hdev, &data->bdaddr);
438 
439 	if (ssp)
440 		*ssp = data->ssp_mode;
441 
442 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
443 	if (ie) {
444 		if (ie->data.ssp_mode && ssp)
445 			*ssp = true;
446 
447 		if (ie->name_state == NAME_NEEDED &&
448 		    data->rssi != ie->data.rssi) {
449 			ie->data.rssi = data->rssi;
450 			hci_inquiry_cache_update_resolve(hdev, ie);
451 		}
452 
453 		goto update;
454 	}
455 
456 	/* Entry not in the cache. Add new one. */
457 	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
458 	if (!ie)
459 		return false;
460 
461 	list_add(&ie->all, &cache->all);
462 
463 	if (name_known) {
464 		ie->name_state = NAME_KNOWN;
465 	} else {
466 		ie->name_state = NAME_NOT_KNOWN;
467 		list_add(&ie->list, &cache->unknown);
468 	}
469 
470 update:
471 	if (name_known && ie->name_state != NAME_KNOWN &&
472 	    ie->name_state != NAME_PENDING) {
473 		ie->name_state = NAME_KNOWN;
474 		list_del(&ie->list);
475 	}
476 
477 	memcpy(&ie->data, data, sizeof(*data));
478 	ie->timestamp = jiffies;
479 	cache->timestamp = jiffies;
480 
481 	if (ie->name_state == NAME_NOT_KNOWN)
482 		return false;
483 
484 	return true;
485 }
486 
487 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
488 {
489 	struct discovery_state *cache = &hdev->discovery;
490 	struct inquiry_info *info = (struct inquiry_info *) buf;
491 	struct inquiry_entry *e;
492 	int copied = 0;
493 
494 	list_for_each_entry(e, &cache->all, all) {
495 		struct inquiry_data *data = &e->data;
496 
497 		if (copied >= num)
498 			break;
499 
500 		bacpy(&info->bdaddr, &data->bdaddr);
501 		info->pscan_rep_mode	= data->pscan_rep_mode;
502 		info->pscan_period_mode	= data->pscan_period_mode;
503 		info->pscan_mode	= data->pscan_mode;
504 		memcpy(info->dev_class, data->dev_class, 3);
505 		info->clock_offset	= data->clock_offset;
506 
507 		info++;
508 		copied++;
509 	}
510 
511 	BT_DBG("cache %p, copied %d", cache, copied);
512 	return copied;
513 }
514 
515 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
516 {
517 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
518 	struct hci_cp_inquiry cp;
519 
520 	BT_DBG("%s", hdev->name);
521 
522 	if (test_bit(HCI_INQUIRY, &hdev->flags))
523 		return;
524 
525 	/* Start Inquiry */
526 	memcpy(&cp.lap, &ir->lap, 3);
527 	cp.length  = ir->length;
528 	cp.num_rsp = ir->num_rsp;
529 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
530 }
531 
532 int hci_inquiry(void __user *arg)
533 {
534 	__u8 __user *ptr = arg;
535 	struct hci_inquiry_req ir;
536 	struct hci_dev *hdev;
537 	int err = 0, do_inquiry = 0, max_rsp;
538 	long timeo;
539 	__u8 *buf;
540 
541 	if (copy_from_user(&ir, ptr, sizeof(ir)))
542 		return -EFAULT;
543 
544 	hdev = hci_dev_get(ir.dev_id);
545 	if (!hdev)
546 		return -ENODEV;
547 
548 	hci_dev_lock(hdev);
549 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
550 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
551 		inquiry_cache_flush(hdev);
552 		do_inquiry = 1;
553 	}
554 	hci_dev_unlock(hdev);
555 
556 	timeo = ir.length * msecs_to_jiffies(2000);
557 
558 	if (do_inquiry) {
559 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
560 		if (err < 0)
561 			goto done;
562 	}
563 
564 	/* for unlimited number of responses we will use buffer with
565 	 * 255 entries
566 	 */
567 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
568 
569 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
570 	 * copy it to the user space.
571 	 */
572 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
573 	if (!buf) {
574 		err = -ENOMEM;
575 		goto done;
576 	}
577 
578 	hci_dev_lock(hdev);
579 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
580 	hci_dev_unlock(hdev);
581 
582 	BT_DBG("num_rsp %d", ir.num_rsp);
583 
584 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
585 		ptr += sizeof(ir);
586 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
587 				 ir.num_rsp))
588 			err = -EFAULT;
589 	} else
590 		err = -EFAULT;
591 
592 	kfree(buf);
593 
594 done:
595 	hci_dev_put(hdev);
596 	return err;
597 }
598 
599 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
600 {
601 	u8 ad_len = 0, flags = 0;
602 	size_t name_len;
603 
604 	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
605 		flags |= LE_AD_GENERAL;
606 
607 	if (!lmp_bredr_capable(hdev))
608 		flags |= LE_AD_NO_BREDR;
609 
610 	if (lmp_le_br_capable(hdev))
611 		flags |= LE_AD_SIM_LE_BREDR_CTRL;
612 
613 	if (lmp_host_le_br_capable(hdev))
614 		flags |= LE_AD_SIM_LE_BREDR_HOST;
615 
616 	if (flags) {
617 		BT_DBG("adv flags 0x%02x", flags);
618 
619 		ptr[0] = 2;
620 		ptr[1] = EIR_FLAGS;
621 		ptr[2] = flags;
622 
623 		ad_len += 3;
624 		ptr += 3;
625 	}
626 
627 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
628 		ptr[0] = 2;
629 		ptr[1] = EIR_TX_POWER;
630 		ptr[2] = (u8) hdev->adv_tx_power;
631 
632 		ad_len += 3;
633 		ptr += 3;
634 	}
635 
636 	name_len = strlen(hdev->dev_name);
637 	if (name_len > 0) {
638 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
639 
640 		if (name_len > max_len) {
641 			name_len = max_len;
642 			ptr[1] = EIR_NAME_SHORT;
643 		} else
644 			ptr[1] = EIR_NAME_COMPLETE;
645 
646 		ptr[0] = name_len + 1;
647 
648 		memcpy(ptr + 2, hdev->dev_name, name_len);
649 
650 		ad_len += (name_len + 2);
651 		ptr += (name_len + 2);
652 	}
653 
654 	return ad_len;
655 }
656 
657 int hci_update_ad(struct hci_dev *hdev)
658 {
659 	struct hci_cp_le_set_adv_data cp;
660 	u8 len;
661 	int err;
662 
663 	hci_dev_lock(hdev);
664 
665 	if (!lmp_le_capable(hdev)) {
666 		err = -EINVAL;
667 		goto unlock;
668 	}
669 
670 	memset(&cp, 0, sizeof(cp));
671 
672 	len = create_ad(hdev, cp.data);
673 
674 	if (hdev->adv_data_len == len &&
675 	    memcmp(cp.data, hdev->adv_data, len) == 0) {
676 		err = 0;
677 		goto unlock;
678 	}
679 
680 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
681 	hdev->adv_data_len = len;
682 
683 	cp.length = len;
684 	err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
685 
686 unlock:
687 	hci_dev_unlock(hdev);
688 
689 	return err;
690 }
691 
692 /* ---- HCI ioctl helpers ---- */
693 
694 int hci_dev_open(__u16 dev)
695 {
696 	struct hci_dev *hdev;
697 	int ret = 0;
698 
699 	hdev = hci_dev_get(dev);
700 	if (!hdev)
701 		return -ENODEV;
702 
703 	BT_DBG("%s %p", hdev->name, hdev);
704 
705 	hci_req_lock(hdev);
706 
707 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
708 		ret = -ENODEV;
709 		goto done;
710 	}
711 
712 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
713 		ret = -ERFKILL;
714 		goto done;
715 	}
716 
717 	if (test_bit(HCI_UP, &hdev->flags)) {
718 		ret = -EALREADY;
719 		goto done;
720 	}
721 
722 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
723 		set_bit(HCI_RAW, &hdev->flags);
724 
725 	/* Treat all non BR/EDR controllers as raw devices if
726 	   enable_hs is not set */
727 	if (hdev->dev_type != HCI_BREDR && !enable_hs)
728 		set_bit(HCI_RAW, &hdev->flags);
729 
730 	if (hdev->open(hdev)) {
731 		ret = -EIO;
732 		goto done;
733 	}
734 
735 	if (!test_bit(HCI_RAW, &hdev->flags)) {
736 		atomic_set(&hdev->cmd_cnt, 1);
737 		set_bit(HCI_INIT, &hdev->flags);
738 		hdev->init_last_cmd = 0;
739 
740 		ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
741 
742 		clear_bit(HCI_INIT, &hdev->flags);
743 	}
744 
745 	if (!ret) {
746 		hci_dev_hold(hdev);
747 		set_bit(HCI_UP, &hdev->flags);
748 		hci_notify(hdev, HCI_DEV_UP);
749 		hci_update_ad(hdev);
750 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
751 		    mgmt_valid_hdev(hdev)) {
752 			hci_dev_lock(hdev);
753 			mgmt_powered(hdev, 1);
754 			hci_dev_unlock(hdev);
755 		}
756 	} else {
757 		/* Init failed, cleanup */
758 		flush_work(&hdev->tx_work);
759 		flush_work(&hdev->cmd_work);
760 		flush_work(&hdev->rx_work);
761 
762 		skb_queue_purge(&hdev->cmd_q);
763 		skb_queue_purge(&hdev->rx_q);
764 
765 		if (hdev->flush)
766 			hdev->flush(hdev);
767 
768 		if (hdev->sent_cmd) {
769 			kfree_skb(hdev->sent_cmd);
770 			hdev->sent_cmd = NULL;
771 		}
772 
773 		hdev->close(hdev);
774 		hdev->flags = 0;
775 	}
776 
777 done:
778 	hci_req_unlock(hdev);
779 	hci_dev_put(hdev);
780 	return ret;
781 }
782 
783 static int hci_dev_do_close(struct hci_dev *hdev)
784 {
785 	BT_DBG("%s %p", hdev->name, hdev);
786 
787 	cancel_work_sync(&hdev->le_scan);
788 
789 	cancel_delayed_work(&hdev->power_off);
790 
791 	hci_req_cancel(hdev, ENODEV);
792 	hci_req_lock(hdev);
793 
794 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
795 		del_timer_sync(&hdev->cmd_timer);
796 		hci_req_unlock(hdev);
797 		return 0;
798 	}
799 
800 	/* Flush RX and TX works */
801 	flush_work(&hdev->tx_work);
802 	flush_work(&hdev->rx_work);
803 
804 	if (hdev->discov_timeout > 0) {
805 		cancel_delayed_work(&hdev->discov_off);
806 		hdev->discov_timeout = 0;
807 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
808 	}
809 
810 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
811 		cancel_delayed_work(&hdev->service_cache);
812 
813 	cancel_delayed_work_sync(&hdev->le_scan_disable);
814 
815 	hci_dev_lock(hdev);
816 	inquiry_cache_flush(hdev);
817 	hci_conn_hash_flush(hdev);
818 	hci_dev_unlock(hdev);
819 
820 	hci_notify(hdev, HCI_DEV_DOWN);
821 
822 	if (hdev->flush)
823 		hdev->flush(hdev);
824 
825 	/* Reset device */
826 	skb_queue_purge(&hdev->cmd_q);
827 	atomic_set(&hdev->cmd_cnt, 1);
828 	if (!test_bit(HCI_RAW, &hdev->flags) &&
829 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
830 		set_bit(HCI_INIT, &hdev->flags);
831 		__hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
832 		clear_bit(HCI_INIT, &hdev->flags);
833 	}
834 
835 	/* flush cmd  work */
836 	flush_work(&hdev->cmd_work);
837 
838 	/* Drop queues */
839 	skb_queue_purge(&hdev->rx_q);
840 	skb_queue_purge(&hdev->cmd_q);
841 	skb_queue_purge(&hdev->raw_q);
842 
843 	/* Drop last sent command */
844 	if (hdev->sent_cmd) {
845 		del_timer_sync(&hdev->cmd_timer);
846 		kfree_skb(hdev->sent_cmd);
847 		hdev->sent_cmd = NULL;
848 	}
849 
850 	/* After this point our queues are empty
851 	 * and no tasks are scheduled. */
852 	hdev->close(hdev);
853 
854 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
855 	    mgmt_valid_hdev(hdev)) {
856 		hci_dev_lock(hdev);
857 		mgmt_powered(hdev, 0);
858 		hci_dev_unlock(hdev);
859 	}
860 
861 	/* Clear flags */
862 	hdev->flags = 0;
863 
864 	/* Controller radio is available but is currently powered down */
865 	hdev->amp_status = 0;
866 
867 	memset(hdev->eir, 0, sizeof(hdev->eir));
868 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
869 
870 	hci_req_unlock(hdev);
871 
872 	hci_dev_put(hdev);
873 	return 0;
874 }
875 
876 int hci_dev_close(__u16 dev)
877 {
878 	struct hci_dev *hdev;
879 	int err;
880 
881 	hdev = hci_dev_get(dev);
882 	if (!hdev)
883 		return -ENODEV;
884 
885 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
886 		cancel_delayed_work(&hdev->power_off);
887 
888 	err = hci_dev_do_close(hdev);
889 
890 	hci_dev_put(hdev);
891 	return err;
892 }
893 
894 int hci_dev_reset(__u16 dev)
895 {
896 	struct hci_dev *hdev;
897 	int ret = 0;
898 
899 	hdev = hci_dev_get(dev);
900 	if (!hdev)
901 		return -ENODEV;
902 
903 	hci_req_lock(hdev);
904 
905 	if (!test_bit(HCI_UP, &hdev->flags))
906 		goto done;
907 
908 	/* Drop queues */
909 	skb_queue_purge(&hdev->rx_q);
910 	skb_queue_purge(&hdev->cmd_q);
911 
912 	hci_dev_lock(hdev);
913 	inquiry_cache_flush(hdev);
914 	hci_conn_hash_flush(hdev);
915 	hci_dev_unlock(hdev);
916 
917 	if (hdev->flush)
918 		hdev->flush(hdev);
919 
920 	atomic_set(&hdev->cmd_cnt, 1);
921 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
922 
923 	if (!test_bit(HCI_RAW, &hdev->flags))
924 		ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
925 
926 done:
927 	hci_req_unlock(hdev);
928 	hci_dev_put(hdev);
929 	return ret;
930 }
931 
932 int hci_dev_reset_stat(__u16 dev)
933 {
934 	struct hci_dev *hdev;
935 	int ret = 0;
936 
937 	hdev = hci_dev_get(dev);
938 	if (!hdev)
939 		return -ENODEV;
940 
941 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
942 
943 	hci_dev_put(hdev);
944 
945 	return ret;
946 }
947 
948 int hci_dev_cmd(unsigned int cmd, void __user *arg)
949 {
950 	struct hci_dev *hdev;
951 	struct hci_dev_req dr;
952 	int err = 0;
953 
954 	if (copy_from_user(&dr, arg, sizeof(dr)))
955 		return -EFAULT;
956 
957 	hdev = hci_dev_get(dr.dev_id);
958 	if (!hdev)
959 		return -ENODEV;
960 
961 	switch (cmd) {
962 	case HCISETAUTH:
963 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
964 				  HCI_INIT_TIMEOUT);
965 		break;
966 
967 	case HCISETENCRYPT:
968 		if (!lmp_encrypt_capable(hdev)) {
969 			err = -EOPNOTSUPP;
970 			break;
971 		}
972 
973 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
974 			/* Auth must be enabled first */
975 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
976 					  HCI_INIT_TIMEOUT);
977 			if (err)
978 				break;
979 		}
980 
981 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
982 				  HCI_INIT_TIMEOUT);
983 		break;
984 
985 	case HCISETSCAN:
986 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
987 				  HCI_INIT_TIMEOUT);
988 		break;
989 
990 	case HCISETLINKPOL:
991 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
992 				  HCI_INIT_TIMEOUT);
993 		break;
994 
995 	case HCISETLINKMODE:
996 		hdev->link_mode = ((__u16) dr.dev_opt) &
997 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
998 		break;
999 
1000 	case HCISETPTYPE:
1001 		hdev->pkt_type = (__u16) dr.dev_opt;
1002 		break;
1003 
1004 	case HCISETACLMTU:
1005 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1006 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1007 		break;
1008 
1009 	case HCISETSCOMTU:
1010 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1011 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1012 		break;
1013 
1014 	default:
1015 		err = -EINVAL;
1016 		break;
1017 	}
1018 
1019 	hci_dev_put(hdev);
1020 	return err;
1021 }
1022 
1023 int hci_get_dev_list(void __user *arg)
1024 {
1025 	struct hci_dev *hdev;
1026 	struct hci_dev_list_req *dl;
1027 	struct hci_dev_req *dr;
1028 	int n = 0, size, err;
1029 	__u16 dev_num;
1030 
1031 	if (get_user(dev_num, (__u16 __user *) arg))
1032 		return -EFAULT;
1033 
1034 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1035 		return -EINVAL;
1036 
1037 	size = sizeof(*dl) + dev_num * sizeof(*dr);
1038 
1039 	dl = kzalloc(size, GFP_KERNEL);
1040 	if (!dl)
1041 		return -ENOMEM;
1042 
1043 	dr = dl->dev_req;
1044 
1045 	read_lock(&hci_dev_list_lock);
1046 	list_for_each_entry(hdev, &hci_dev_list, list) {
1047 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1048 			cancel_delayed_work(&hdev->power_off);
1049 
1050 		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1051 			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1052 
1053 		(dr + n)->dev_id  = hdev->id;
1054 		(dr + n)->dev_opt = hdev->flags;
1055 
1056 		if (++n >= dev_num)
1057 			break;
1058 	}
1059 	read_unlock(&hci_dev_list_lock);
1060 
1061 	dl->dev_num = n;
1062 	size = sizeof(*dl) + n * sizeof(*dr);
1063 
1064 	err = copy_to_user(arg, dl, size);
1065 	kfree(dl);
1066 
1067 	return err ? -EFAULT : 0;
1068 }
1069 
1070 int hci_get_dev_info(void __user *arg)
1071 {
1072 	struct hci_dev *hdev;
1073 	struct hci_dev_info di;
1074 	int err = 0;
1075 
1076 	if (copy_from_user(&di, arg, sizeof(di)))
1077 		return -EFAULT;
1078 
1079 	hdev = hci_dev_get(di.dev_id);
1080 	if (!hdev)
1081 		return -ENODEV;
1082 
1083 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1084 		cancel_delayed_work_sync(&hdev->power_off);
1085 
1086 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1087 		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1088 
1089 	strcpy(di.name, hdev->name);
1090 	di.bdaddr   = hdev->bdaddr;
1091 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1092 	di.flags    = hdev->flags;
1093 	di.pkt_type = hdev->pkt_type;
1094 	if (lmp_bredr_capable(hdev)) {
1095 		di.acl_mtu  = hdev->acl_mtu;
1096 		di.acl_pkts = hdev->acl_pkts;
1097 		di.sco_mtu  = hdev->sco_mtu;
1098 		di.sco_pkts = hdev->sco_pkts;
1099 	} else {
1100 		di.acl_mtu  = hdev->le_mtu;
1101 		di.acl_pkts = hdev->le_pkts;
1102 		di.sco_mtu  = 0;
1103 		di.sco_pkts = 0;
1104 	}
1105 	di.link_policy = hdev->link_policy;
1106 	di.link_mode   = hdev->link_mode;
1107 
1108 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1109 	memcpy(&di.features, &hdev->features, sizeof(di.features));
1110 
1111 	if (copy_to_user(arg, &di, sizeof(di)))
1112 		err = -EFAULT;
1113 
1114 	hci_dev_put(hdev);
1115 
1116 	return err;
1117 }
1118 
1119 /* ---- Interface to HCI drivers ---- */
1120 
1121 static int hci_rfkill_set_block(void *data, bool blocked)
1122 {
1123 	struct hci_dev *hdev = data;
1124 
1125 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1126 
1127 	if (!blocked)
1128 		return 0;
1129 
1130 	hci_dev_do_close(hdev);
1131 
1132 	return 0;
1133 }
1134 
1135 static const struct rfkill_ops hci_rfkill_ops = {
1136 	.set_block = hci_rfkill_set_block,
1137 };
1138 
1139 static void hci_power_on(struct work_struct *work)
1140 {
1141 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1142 
1143 	BT_DBG("%s", hdev->name);
1144 
1145 	if (hci_dev_open(hdev->id) < 0)
1146 		return;
1147 
1148 	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1149 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1150 				   HCI_AUTO_OFF_TIMEOUT);
1151 
1152 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1153 		mgmt_index_added(hdev);
1154 }
1155 
1156 static void hci_power_off(struct work_struct *work)
1157 {
1158 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1159 					    power_off.work);
1160 
1161 	BT_DBG("%s", hdev->name);
1162 
1163 	hci_dev_do_close(hdev);
1164 }
1165 
1166 static void hci_discov_off(struct work_struct *work)
1167 {
1168 	struct hci_dev *hdev;
1169 	u8 scan = SCAN_PAGE;
1170 
1171 	hdev = container_of(work, struct hci_dev, discov_off.work);
1172 
1173 	BT_DBG("%s", hdev->name);
1174 
1175 	hci_dev_lock(hdev);
1176 
1177 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1178 
1179 	hdev->discov_timeout = 0;
1180 
1181 	hci_dev_unlock(hdev);
1182 }
1183 
1184 int hci_uuids_clear(struct hci_dev *hdev)
1185 {
1186 	struct bt_uuid *uuid, *tmp;
1187 
1188 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1189 		list_del(&uuid->list);
1190 		kfree(uuid);
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 int hci_link_keys_clear(struct hci_dev *hdev)
1197 {
1198 	struct list_head *p, *n;
1199 
1200 	list_for_each_safe(p, n, &hdev->link_keys) {
1201 		struct link_key *key;
1202 
1203 		key = list_entry(p, struct link_key, list);
1204 
1205 		list_del(p);
1206 		kfree(key);
1207 	}
1208 
1209 	return 0;
1210 }
1211 
1212 int hci_smp_ltks_clear(struct hci_dev *hdev)
1213 {
1214 	struct smp_ltk *k, *tmp;
1215 
1216 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1217 		list_del(&k->list);
1218 		kfree(k);
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1225 {
1226 	struct link_key *k;
1227 
1228 	list_for_each_entry(k, &hdev->link_keys, list)
1229 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1230 			return k;
1231 
1232 	return NULL;
1233 }
1234 
1235 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1236 			       u8 key_type, u8 old_key_type)
1237 {
1238 	/* Legacy key */
1239 	if (key_type < 0x03)
1240 		return true;
1241 
1242 	/* Debug keys are insecure so don't store them persistently */
1243 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1244 		return false;
1245 
1246 	/* Changed combination key and there's no previous one */
1247 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1248 		return false;
1249 
1250 	/* Security mode 3 case */
1251 	if (!conn)
1252 		return true;
1253 
1254 	/* Neither local nor remote side had no-bonding as requirement */
1255 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1256 		return true;
1257 
1258 	/* Local side had dedicated bonding as requirement */
1259 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1260 		return true;
1261 
1262 	/* Remote side had dedicated bonding as requirement */
1263 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1264 		return true;
1265 
1266 	/* If none of the above criteria match, then don't store the key
1267 	 * persistently */
1268 	return false;
1269 }
1270 
1271 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1272 {
1273 	struct smp_ltk *k;
1274 
1275 	list_for_each_entry(k, &hdev->long_term_keys, list) {
1276 		if (k->ediv != ediv ||
1277 		    memcmp(rand, k->rand, sizeof(k->rand)))
1278 			continue;
1279 
1280 		return k;
1281 	}
1282 
1283 	return NULL;
1284 }
1285 
1286 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1287 				     u8 addr_type)
1288 {
1289 	struct smp_ltk *k;
1290 
1291 	list_for_each_entry(k, &hdev->long_term_keys, list)
1292 		if (addr_type == k->bdaddr_type &&
1293 		    bacmp(bdaddr, &k->bdaddr) == 0)
1294 			return k;
1295 
1296 	return NULL;
1297 }
1298 
1299 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1300 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1301 {
1302 	struct link_key *key, *old_key;
1303 	u8 old_key_type;
1304 	bool persistent;
1305 
1306 	old_key = hci_find_link_key(hdev, bdaddr);
1307 	if (old_key) {
1308 		old_key_type = old_key->type;
1309 		key = old_key;
1310 	} else {
1311 		old_key_type = conn ? conn->key_type : 0xff;
1312 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1313 		if (!key)
1314 			return -ENOMEM;
1315 		list_add(&key->list, &hdev->link_keys);
1316 	}
1317 
1318 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1319 
1320 	/* Some buggy controller combinations generate a changed
1321 	 * combination key for legacy pairing even when there's no
1322 	 * previous key */
1323 	if (type == HCI_LK_CHANGED_COMBINATION &&
1324 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1325 		type = HCI_LK_COMBINATION;
1326 		if (conn)
1327 			conn->key_type = type;
1328 	}
1329 
1330 	bacpy(&key->bdaddr, bdaddr);
1331 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1332 	key->pin_len = pin_len;
1333 
1334 	if (type == HCI_LK_CHANGED_COMBINATION)
1335 		key->type = old_key_type;
1336 	else
1337 		key->type = type;
1338 
1339 	if (!new_key)
1340 		return 0;
1341 
1342 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1343 
1344 	mgmt_new_link_key(hdev, key, persistent);
1345 
1346 	if (conn)
1347 		conn->flush_key = !persistent;
1348 
1349 	return 0;
1350 }
1351 
1352 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1353 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1354 		ediv, u8 rand[8])
1355 {
1356 	struct smp_ltk *key, *old_key;
1357 
1358 	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1359 		return 0;
1360 
1361 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1362 	if (old_key)
1363 		key = old_key;
1364 	else {
1365 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1366 		if (!key)
1367 			return -ENOMEM;
1368 		list_add(&key->list, &hdev->long_term_keys);
1369 	}
1370 
1371 	bacpy(&key->bdaddr, bdaddr);
1372 	key->bdaddr_type = addr_type;
1373 	memcpy(key->val, tk, sizeof(key->val));
1374 	key->authenticated = authenticated;
1375 	key->ediv = ediv;
1376 	key->enc_size = enc_size;
1377 	key->type = type;
1378 	memcpy(key->rand, rand, sizeof(key->rand));
1379 
1380 	if (!new_key)
1381 		return 0;
1382 
1383 	if (type & HCI_SMP_LTK)
1384 		mgmt_new_ltk(hdev, key, 1);
1385 
1386 	return 0;
1387 }
1388 
1389 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1390 {
1391 	struct link_key *key;
1392 
1393 	key = hci_find_link_key(hdev, bdaddr);
1394 	if (!key)
1395 		return -ENOENT;
1396 
1397 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1398 
1399 	list_del(&key->list);
1400 	kfree(key);
1401 
1402 	return 0;
1403 }
1404 
1405 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1406 {
1407 	struct smp_ltk *k, *tmp;
1408 
1409 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1410 		if (bacmp(bdaddr, &k->bdaddr))
1411 			continue;
1412 
1413 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1414 
1415 		list_del(&k->list);
1416 		kfree(k);
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 /* HCI command timer function */
1423 static void hci_cmd_timeout(unsigned long arg)
1424 {
1425 	struct hci_dev *hdev = (void *) arg;
1426 
1427 	if (hdev->sent_cmd) {
1428 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1429 		u16 opcode = __le16_to_cpu(sent->opcode);
1430 
1431 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1432 	} else {
1433 		BT_ERR("%s command tx timeout", hdev->name);
1434 	}
1435 
1436 	atomic_set(&hdev->cmd_cnt, 1);
1437 	queue_work(hdev->workqueue, &hdev->cmd_work);
1438 }
1439 
1440 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1441 					  bdaddr_t *bdaddr)
1442 {
1443 	struct oob_data *data;
1444 
1445 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1446 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1447 			return data;
1448 
1449 	return NULL;
1450 }
1451 
1452 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1453 {
1454 	struct oob_data *data;
1455 
1456 	data = hci_find_remote_oob_data(hdev, bdaddr);
1457 	if (!data)
1458 		return -ENOENT;
1459 
1460 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1461 
1462 	list_del(&data->list);
1463 	kfree(data);
1464 
1465 	return 0;
1466 }
1467 
1468 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1469 {
1470 	struct oob_data *data, *n;
1471 
1472 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1473 		list_del(&data->list);
1474 		kfree(data);
1475 	}
1476 
1477 	return 0;
1478 }
1479 
1480 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1481 			    u8 *randomizer)
1482 {
1483 	struct oob_data *data;
1484 
1485 	data = hci_find_remote_oob_data(hdev, bdaddr);
1486 
1487 	if (!data) {
1488 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1489 		if (!data)
1490 			return -ENOMEM;
1491 
1492 		bacpy(&data->bdaddr, bdaddr);
1493 		list_add(&data->list, &hdev->remote_oob_data);
1494 	}
1495 
1496 	memcpy(data->hash, hash, sizeof(data->hash));
1497 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1498 
1499 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1500 
1501 	return 0;
1502 }
1503 
1504 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505 {
1506 	struct bdaddr_list *b;
1507 
1508 	list_for_each_entry(b, &hdev->blacklist, list)
1509 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1510 			return b;
1511 
1512 	return NULL;
1513 }
1514 
1515 int hci_blacklist_clear(struct hci_dev *hdev)
1516 {
1517 	struct list_head *p, *n;
1518 
1519 	list_for_each_safe(p, n, &hdev->blacklist) {
1520 		struct bdaddr_list *b;
1521 
1522 		b = list_entry(p, struct bdaddr_list, list);
1523 
1524 		list_del(p);
1525 		kfree(b);
1526 	}
1527 
1528 	return 0;
1529 }
1530 
1531 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1532 {
1533 	struct bdaddr_list *entry;
1534 
1535 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1536 		return -EBADF;
1537 
1538 	if (hci_blacklist_lookup(hdev, bdaddr))
1539 		return -EEXIST;
1540 
1541 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1542 	if (!entry)
1543 		return -ENOMEM;
1544 
1545 	bacpy(&entry->bdaddr, bdaddr);
1546 
1547 	list_add(&entry->list, &hdev->blacklist);
1548 
1549 	return mgmt_device_blocked(hdev, bdaddr, type);
1550 }
1551 
1552 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1553 {
1554 	struct bdaddr_list *entry;
1555 
1556 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1557 		return hci_blacklist_clear(hdev);
1558 
1559 	entry = hci_blacklist_lookup(hdev, bdaddr);
1560 	if (!entry)
1561 		return -ENOENT;
1562 
1563 	list_del(&entry->list);
1564 	kfree(entry);
1565 
1566 	return mgmt_device_unblocked(hdev, bdaddr, type);
1567 }
1568 
1569 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1570 {
1571 	struct le_scan_params *param =  (struct le_scan_params *) opt;
1572 	struct hci_cp_le_set_scan_param cp;
1573 
1574 	memset(&cp, 0, sizeof(cp));
1575 	cp.type = param->type;
1576 	cp.interval = cpu_to_le16(param->interval);
1577 	cp.window = cpu_to_le16(param->window);
1578 
1579 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1580 }
1581 
1582 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1583 {
1584 	struct hci_cp_le_set_scan_enable cp;
1585 
1586 	memset(&cp, 0, sizeof(cp));
1587 	cp.enable = 1;
1588 	cp.filter_dup = 1;
1589 
1590 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1591 }
1592 
1593 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1594 			  u16 window, int timeout)
1595 {
1596 	long timeo = msecs_to_jiffies(3000);
1597 	struct le_scan_params param;
1598 	int err;
1599 
1600 	BT_DBG("%s", hdev->name);
1601 
1602 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1603 		return -EINPROGRESS;
1604 
1605 	param.type = type;
1606 	param.interval = interval;
1607 	param.window = window;
1608 
1609 	hci_req_lock(hdev);
1610 
1611 	err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1612 			    timeo);
1613 	if (!err)
1614 		err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1615 
1616 	hci_req_unlock(hdev);
1617 
1618 	if (err < 0)
1619 		return err;
1620 
1621 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1622 			   msecs_to_jiffies(timeout));
1623 
1624 	return 0;
1625 }
1626 
1627 int hci_cancel_le_scan(struct hci_dev *hdev)
1628 {
1629 	BT_DBG("%s", hdev->name);
1630 
1631 	if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1632 		return -EALREADY;
1633 
1634 	if (cancel_delayed_work(&hdev->le_scan_disable)) {
1635 		struct hci_cp_le_set_scan_enable cp;
1636 
1637 		/* Send HCI command to disable LE Scan */
1638 		memset(&cp, 0, sizeof(cp));
1639 		hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 static void le_scan_disable_work(struct work_struct *work)
1646 {
1647 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1648 					    le_scan_disable.work);
1649 	struct hci_cp_le_set_scan_enable cp;
1650 
1651 	BT_DBG("%s", hdev->name);
1652 
1653 	memset(&cp, 0, sizeof(cp));
1654 
1655 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1656 }
1657 
1658 static void le_scan_work(struct work_struct *work)
1659 {
1660 	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1661 	struct le_scan_params *param = &hdev->le_scan_params;
1662 
1663 	BT_DBG("%s", hdev->name);
1664 
1665 	hci_do_le_scan(hdev, param->type, param->interval, param->window,
1666 		       param->timeout);
1667 }
1668 
1669 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1670 		int timeout)
1671 {
1672 	struct le_scan_params *param = &hdev->le_scan_params;
1673 
1674 	BT_DBG("%s", hdev->name);
1675 
1676 	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1677 		return -ENOTSUPP;
1678 
1679 	if (work_busy(&hdev->le_scan))
1680 		return -EINPROGRESS;
1681 
1682 	param->type = type;
1683 	param->interval = interval;
1684 	param->window = window;
1685 	param->timeout = timeout;
1686 
1687 	queue_work(system_long_wq, &hdev->le_scan);
1688 
1689 	return 0;
1690 }
1691 
1692 /* Alloc HCI device */
1693 struct hci_dev *hci_alloc_dev(void)
1694 {
1695 	struct hci_dev *hdev;
1696 
1697 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1698 	if (!hdev)
1699 		return NULL;
1700 
1701 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1702 	hdev->esco_type = (ESCO_HV1);
1703 	hdev->link_mode = (HCI_LM_ACCEPT);
1704 	hdev->io_capability = 0x03; /* No Input No Output */
1705 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1706 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1707 
1708 	hdev->sniff_max_interval = 800;
1709 	hdev->sniff_min_interval = 80;
1710 
1711 	mutex_init(&hdev->lock);
1712 	mutex_init(&hdev->req_lock);
1713 
1714 	INIT_LIST_HEAD(&hdev->mgmt_pending);
1715 	INIT_LIST_HEAD(&hdev->blacklist);
1716 	INIT_LIST_HEAD(&hdev->uuids);
1717 	INIT_LIST_HEAD(&hdev->link_keys);
1718 	INIT_LIST_HEAD(&hdev->long_term_keys);
1719 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1720 	INIT_LIST_HEAD(&hdev->conn_hash.list);
1721 
1722 	INIT_WORK(&hdev->rx_work, hci_rx_work);
1723 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1724 	INIT_WORK(&hdev->tx_work, hci_tx_work);
1725 	INIT_WORK(&hdev->power_on, hci_power_on);
1726 	INIT_WORK(&hdev->le_scan, le_scan_work);
1727 
1728 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1729 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1730 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1731 
1732 	skb_queue_head_init(&hdev->driver_init);
1733 	skb_queue_head_init(&hdev->rx_q);
1734 	skb_queue_head_init(&hdev->cmd_q);
1735 	skb_queue_head_init(&hdev->raw_q);
1736 
1737 	init_waitqueue_head(&hdev->req_wait_q);
1738 
1739 	setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1740 
1741 	hci_init_sysfs(hdev);
1742 	discovery_init(hdev);
1743 
1744 	return hdev;
1745 }
1746 EXPORT_SYMBOL(hci_alloc_dev);
1747 
1748 /* Free HCI device */
1749 void hci_free_dev(struct hci_dev *hdev)
1750 {
1751 	skb_queue_purge(&hdev->driver_init);
1752 
1753 	/* will free via device release */
1754 	put_device(&hdev->dev);
1755 }
1756 EXPORT_SYMBOL(hci_free_dev);
1757 
1758 /* Register HCI device */
1759 int hci_register_dev(struct hci_dev *hdev)
1760 {
1761 	int id, error;
1762 
1763 	if (!hdev->open || !hdev->close)
1764 		return -EINVAL;
1765 
1766 	/* Do not allow HCI_AMP devices to register at index 0,
1767 	 * so the index can be used as the AMP controller ID.
1768 	 */
1769 	switch (hdev->dev_type) {
1770 	case HCI_BREDR:
1771 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1772 		break;
1773 	case HCI_AMP:
1774 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1775 		break;
1776 	default:
1777 		return -EINVAL;
1778 	}
1779 
1780 	if (id < 0)
1781 		return id;
1782 
1783 	sprintf(hdev->name, "hci%d", id);
1784 	hdev->id = id;
1785 
1786 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1787 
1788 	write_lock(&hci_dev_list_lock);
1789 	list_add(&hdev->list, &hci_dev_list);
1790 	write_unlock(&hci_dev_list_lock);
1791 
1792 	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1793 					  WQ_MEM_RECLAIM, 1);
1794 	if (!hdev->workqueue) {
1795 		error = -ENOMEM;
1796 		goto err;
1797 	}
1798 
1799 	hdev->req_workqueue = alloc_workqueue(hdev->name,
1800 					      WQ_HIGHPRI | WQ_UNBOUND |
1801 					      WQ_MEM_RECLAIM, 1);
1802 	if (!hdev->req_workqueue) {
1803 		destroy_workqueue(hdev->workqueue);
1804 		error = -ENOMEM;
1805 		goto err;
1806 	}
1807 
1808 	error = hci_add_sysfs(hdev);
1809 	if (error < 0)
1810 		goto err_wqueue;
1811 
1812 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1813 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1814 				    hdev);
1815 	if (hdev->rfkill) {
1816 		if (rfkill_register(hdev->rfkill) < 0) {
1817 			rfkill_destroy(hdev->rfkill);
1818 			hdev->rfkill = NULL;
1819 		}
1820 	}
1821 
1822 	set_bit(HCI_SETUP, &hdev->dev_flags);
1823 
1824 	if (hdev->dev_type != HCI_AMP)
1825 		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1826 
1827 	hci_notify(hdev, HCI_DEV_REG);
1828 	hci_dev_hold(hdev);
1829 
1830 	queue_work(hdev->req_workqueue, &hdev->power_on);
1831 
1832 	return id;
1833 
1834 err_wqueue:
1835 	destroy_workqueue(hdev->workqueue);
1836 	destroy_workqueue(hdev->req_workqueue);
1837 err:
1838 	ida_simple_remove(&hci_index_ida, hdev->id);
1839 	write_lock(&hci_dev_list_lock);
1840 	list_del(&hdev->list);
1841 	write_unlock(&hci_dev_list_lock);
1842 
1843 	return error;
1844 }
1845 EXPORT_SYMBOL(hci_register_dev);
1846 
1847 /* Unregister HCI device */
1848 void hci_unregister_dev(struct hci_dev *hdev)
1849 {
1850 	int i, id;
1851 
1852 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1853 
1854 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1855 
1856 	id = hdev->id;
1857 
1858 	write_lock(&hci_dev_list_lock);
1859 	list_del(&hdev->list);
1860 	write_unlock(&hci_dev_list_lock);
1861 
1862 	hci_dev_do_close(hdev);
1863 
1864 	for (i = 0; i < NUM_REASSEMBLY; i++)
1865 		kfree_skb(hdev->reassembly[i]);
1866 
1867 	cancel_work_sync(&hdev->power_on);
1868 
1869 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1870 	    !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1871 		hci_dev_lock(hdev);
1872 		mgmt_index_removed(hdev);
1873 		hci_dev_unlock(hdev);
1874 	}
1875 
1876 	/* mgmt_index_removed should take care of emptying the
1877 	 * pending list */
1878 	BUG_ON(!list_empty(&hdev->mgmt_pending));
1879 
1880 	hci_notify(hdev, HCI_DEV_UNREG);
1881 
1882 	if (hdev->rfkill) {
1883 		rfkill_unregister(hdev->rfkill);
1884 		rfkill_destroy(hdev->rfkill);
1885 	}
1886 
1887 	hci_del_sysfs(hdev);
1888 
1889 	destroy_workqueue(hdev->workqueue);
1890 	destroy_workqueue(hdev->req_workqueue);
1891 
1892 	hci_dev_lock(hdev);
1893 	hci_blacklist_clear(hdev);
1894 	hci_uuids_clear(hdev);
1895 	hci_link_keys_clear(hdev);
1896 	hci_smp_ltks_clear(hdev);
1897 	hci_remote_oob_data_clear(hdev);
1898 	hci_dev_unlock(hdev);
1899 
1900 	hci_dev_put(hdev);
1901 
1902 	ida_simple_remove(&hci_index_ida, id);
1903 }
1904 EXPORT_SYMBOL(hci_unregister_dev);
1905 
1906 /* Suspend HCI device */
1907 int hci_suspend_dev(struct hci_dev *hdev)
1908 {
1909 	hci_notify(hdev, HCI_DEV_SUSPEND);
1910 	return 0;
1911 }
1912 EXPORT_SYMBOL(hci_suspend_dev);
1913 
1914 /* Resume HCI device */
1915 int hci_resume_dev(struct hci_dev *hdev)
1916 {
1917 	hci_notify(hdev, HCI_DEV_RESUME);
1918 	return 0;
1919 }
1920 EXPORT_SYMBOL(hci_resume_dev);
1921 
1922 /* Receive frame from HCI drivers */
1923 int hci_recv_frame(struct sk_buff *skb)
1924 {
1925 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1926 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1927 		      && !test_bit(HCI_INIT, &hdev->flags))) {
1928 		kfree_skb(skb);
1929 		return -ENXIO;
1930 	}
1931 
1932 	/* Incoming skb */
1933 	bt_cb(skb)->incoming = 1;
1934 
1935 	/* Time stamp */
1936 	__net_timestamp(skb);
1937 
1938 	skb_queue_tail(&hdev->rx_q, skb);
1939 	queue_work(hdev->workqueue, &hdev->rx_work);
1940 
1941 	return 0;
1942 }
1943 EXPORT_SYMBOL(hci_recv_frame);
1944 
1945 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1946 			  int count, __u8 index)
1947 {
1948 	int len = 0;
1949 	int hlen = 0;
1950 	int remain = count;
1951 	struct sk_buff *skb;
1952 	struct bt_skb_cb *scb;
1953 
1954 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1955 	    index >= NUM_REASSEMBLY)
1956 		return -EILSEQ;
1957 
1958 	skb = hdev->reassembly[index];
1959 
1960 	if (!skb) {
1961 		switch (type) {
1962 		case HCI_ACLDATA_PKT:
1963 			len = HCI_MAX_FRAME_SIZE;
1964 			hlen = HCI_ACL_HDR_SIZE;
1965 			break;
1966 		case HCI_EVENT_PKT:
1967 			len = HCI_MAX_EVENT_SIZE;
1968 			hlen = HCI_EVENT_HDR_SIZE;
1969 			break;
1970 		case HCI_SCODATA_PKT:
1971 			len = HCI_MAX_SCO_SIZE;
1972 			hlen = HCI_SCO_HDR_SIZE;
1973 			break;
1974 		}
1975 
1976 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1977 		if (!skb)
1978 			return -ENOMEM;
1979 
1980 		scb = (void *) skb->cb;
1981 		scb->expect = hlen;
1982 		scb->pkt_type = type;
1983 
1984 		skb->dev = (void *) hdev;
1985 		hdev->reassembly[index] = skb;
1986 	}
1987 
1988 	while (count) {
1989 		scb = (void *) skb->cb;
1990 		len = min_t(uint, scb->expect, count);
1991 
1992 		memcpy(skb_put(skb, len), data, len);
1993 
1994 		count -= len;
1995 		data += len;
1996 		scb->expect -= len;
1997 		remain = count;
1998 
1999 		switch (type) {
2000 		case HCI_EVENT_PKT:
2001 			if (skb->len == HCI_EVENT_HDR_SIZE) {
2002 				struct hci_event_hdr *h = hci_event_hdr(skb);
2003 				scb->expect = h->plen;
2004 
2005 				if (skb_tailroom(skb) < scb->expect) {
2006 					kfree_skb(skb);
2007 					hdev->reassembly[index] = NULL;
2008 					return -ENOMEM;
2009 				}
2010 			}
2011 			break;
2012 
2013 		case HCI_ACLDATA_PKT:
2014 			if (skb->len  == HCI_ACL_HDR_SIZE) {
2015 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
2016 				scb->expect = __le16_to_cpu(h->dlen);
2017 
2018 				if (skb_tailroom(skb) < scb->expect) {
2019 					kfree_skb(skb);
2020 					hdev->reassembly[index] = NULL;
2021 					return -ENOMEM;
2022 				}
2023 			}
2024 			break;
2025 
2026 		case HCI_SCODATA_PKT:
2027 			if (skb->len == HCI_SCO_HDR_SIZE) {
2028 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
2029 				scb->expect = h->dlen;
2030 
2031 				if (skb_tailroom(skb) < scb->expect) {
2032 					kfree_skb(skb);
2033 					hdev->reassembly[index] = NULL;
2034 					return -ENOMEM;
2035 				}
2036 			}
2037 			break;
2038 		}
2039 
2040 		if (scb->expect == 0) {
2041 			/* Complete frame */
2042 
2043 			bt_cb(skb)->pkt_type = type;
2044 			hci_recv_frame(skb);
2045 
2046 			hdev->reassembly[index] = NULL;
2047 			return remain;
2048 		}
2049 	}
2050 
2051 	return remain;
2052 }
2053 
2054 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2055 {
2056 	int rem = 0;
2057 
2058 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2059 		return -EILSEQ;
2060 
2061 	while (count) {
2062 		rem = hci_reassembly(hdev, type, data, count, type - 1);
2063 		if (rem < 0)
2064 			return rem;
2065 
2066 		data += (count - rem);
2067 		count = rem;
2068 	}
2069 
2070 	return rem;
2071 }
2072 EXPORT_SYMBOL(hci_recv_fragment);
2073 
2074 #define STREAM_REASSEMBLY 0
2075 
2076 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2077 {
2078 	int type;
2079 	int rem = 0;
2080 
2081 	while (count) {
2082 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2083 
2084 		if (!skb) {
2085 			struct { char type; } *pkt;
2086 
2087 			/* Start of the frame */
2088 			pkt = data;
2089 			type = pkt->type;
2090 
2091 			data++;
2092 			count--;
2093 		} else
2094 			type = bt_cb(skb)->pkt_type;
2095 
2096 		rem = hci_reassembly(hdev, type, data, count,
2097 				     STREAM_REASSEMBLY);
2098 		if (rem < 0)
2099 			return rem;
2100 
2101 		data += (count - rem);
2102 		count = rem;
2103 	}
2104 
2105 	return rem;
2106 }
2107 EXPORT_SYMBOL(hci_recv_stream_fragment);
2108 
2109 /* ---- Interface to upper protocols ---- */
2110 
2111 int hci_register_cb(struct hci_cb *cb)
2112 {
2113 	BT_DBG("%p name %s", cb, cb->name);
2114 
2115 	write_lock(&hci_cb_list_lock);
2116 	list_add(&cb->list, &hci_cb_list);
2117 	write_unlock(&hci_cb_list_lock);
2118 
2119 	return 0;
2120 }
2121 EXPORT_SYMBOL(hci_register_cb);
2122 
2123 int hci_unregister_cb(struct hci_cb *cb)
2124 {
2125 	BT_DBG("%p name %s", cb, cb->name);
2126 
2127 	write_lock(&hci_cb_list_lock);
2128 	list_del(&cb->list);
2129 	write_unlock(&hci_cb_list_lock);
2130 
2131 	return 0;
2132 }
2133 EXPORT_SYMBOL(hci_unregister_cb);
2134 
2135 static int hci_send_frame(struct sk_buff *skb)
2136 {
2137 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2138 
2139 	if (!hdev) {
2140 		kfree_skb(skb);
2141 		return -ENODEV;
2142 	}
2143 
2144 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2145 
2146 	/* Time stamp */
2147 	__net_timestamp(skb);
2148 
2149 	/* Send copy to monitor */
2150 	hci_send_to_monitor(hdev, skb);
2151 
2152 	if (atomic_read(&hdev->promisc)) {
2153 		/* Send copy to the sockets */
2154 		hci_send_to_sock(hdev, skb);
2155 	}
2156 
2157 	/* Get rid of skb owner, prior to sending to the driver. */
2158 	skb_orphan(skb);
2159 
2160 	return hdev->send(skb);
2161 }
2162 
2163 /* Send HCI command */
2164 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2165 {
2166 	int len = HCI_COMMAND_HDR_SIZE + plen;
2167 	struct hci_command_hdr *hdr;
2168 	struct sk_buff *skb;
2169 
2170 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2171 
2172 	skb = bt_skb_alloc(len, GFP_ATOMIC);
2173 	if (!skb) {
2174 		BT_ERR("%s no memory for command", hdev->name);
2175 		return -ENOMEM;
2176 	}
2177 
2178 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2179 	hdr->opcode = cpu_to_le16(opcode);
2180 	hdr->plen   = plen;
2181 
2182 	if (plen)
2183 		memcpy(skb_put(skb, plen), param, plen);
2184 
2185 	BT_DBG("skb len %d", skb->len);
2186 
2187 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2188 	skb->dev = (void *) hdev;
2189 
2190 	if (test_bit(HCI_INIT, &hdev->flags))
2191 		hdev->init_last_cmd = opcode;
2192 
2193 	skb_queue_tail(&hdev->cmd_q, skb);
2194 	queue_work(hdev->workqueue, &hdev->cmd_work);
2195 
2196 	return 0;
2197 }
2198 
2199 /* Get data from the previously sent command */
2200 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2201 {
2202 	struct hci_command_hdr *hdr;
2203 
2204 	if (!hdev->sent_cmd)
2205 		return NULL;
2206 
2207 	hdr = (void *) hdev->sent_cmd->data;
2208 
2209 	if (hdr->opcode != cpu_to_le16(opcode))
2210 		return NULL;
2211 
2212 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2213 
2214 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2215 }
2216 
2217 /* Send ACL data */
2218 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2219 {
2220 	struct hci_acl_hdr *hdr;
2221 	int len = skb->len;
2222 
2223 	skb_push(skb, HCI_ACL_HDR_SIZE);
2224 	skb_reset_transport_header(skb);
2225 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2226 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2227 	hdr->dlen   = cpu_to_le16(len);
2228 }
2229 
2230 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2231 			  struct sk_buff *skb, __u16 flags)
2232 {
2233 	struct hci_conn *conn = chan->conn;
2234 	struct hci_dev *hdev = conn->hdev;
2235 	struct sk_buff *list;
2236 
2237 	skb->len = skb_headlen(skb);
2238 	skb->data_len = 0;
2239 
2240 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2241 
2242 	switch (hdev->dev_type) {
2243 	case HCI_BREDR:
2244 		hci_add_acl_hdr(skb, conn->handle, flags);
2245 		break;
2246 	case HCI_AMP:
2247 		hci_add_acl_hdr(skb, chan->handle, flags);
2248 		break;
2249 	default:
2250 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2251 		return;
2252 	}
2253 
2254 	list = skb_shinfo(skb)->frag_list;
2255 	if (!list) {
2256 		/* Non fragmented */
2257 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2258 
2259 		skb_queue_tail(queue, skb);
2260 	} else {
2261 		/* Fragmented */
2262 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2263 
2264 		skb_shinfo(skb)->frag_list = NULL;
2265 
2266 		/* Queue all fragments atomically */
2267 		spin_lock(&queue->lock);
2268 
2269 		__skb_queue_tail(queue, skb);
2270 
2271 		flags &= ~ACL_START;
2272 		flags |= ACL_CONT;
2273 		do {
2274 			skb = list; list = list->next;
2275 
2276 			skb->dev = (void *) hdev;
2277 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2278 			hci_add_acl_hdr(skb, conn->handle, flags);
2279 
2280 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2281 
2282 			__skb_queue_tail(queue, skb);
2283 		} while (list);
2284 
2285 		spin_unlock(&queue->lock);
2286 	}
2287 }
2288 
2289 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2290 {
2291 	struct hci_dev *hdev = chan->conn->hdev;
2292 
2293 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2294 
2295 	skb->dev = (void *) hdev;
2296 
2297 	hci_queue_acl(chan, &chan->data_q, skb, flags);
2298 
2299 	queue_work(hdev->workqueue, &hdev->tx_work);
2300 }
2301 
2302 /* Send SCO data */
2303 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2304 {
2305 	struct hci_dev *hdev = conn->hdev;
2306 	struct hci_sco_hdr hdr;
2307 
2308 	BT_DBG("%s len %d", hdev->name, skb->len);
2309 
2310 	hdr.handle = cpu_to_le16(conn->handle);
2311 	hdr.dlen   = skb->len;
2312 
2313 	skb_push(skb, HCI_SCO_HDR_SIZE);
2314 	skb_reset_transport_header(skb);
2315 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2316 
2317 	skb->dev = (void *) hdev;
2318 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2319 
2320 	skb_queue_tail(&conn->data_q, skb);
2321 	queue_work(hdev->workqueue, &hdev->tx_work);
2322 }
2323 
2324 /* ---- HCI TX task (outgoing data) ---- */
2325 
2326 /* HCI Connection scheduler */
2327 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2328 				     int *quote)
2329 {
2330 	struct hci_conn_hash *h = &hdev->conn_hash;
2331 	struct hci_conn *conn = NULL, *c;
2332 	unsigned int num = 0, min = ~0;
2333 
2334 	/* We don't have to lock device here. Connections are always
2335 	 * added and removed with TX task disabled. */
2336 
2337 	rcu_read_lock();
2338 
2339 	list_for_each_entry_rcu(c, &h->list, list) {
2340 		if (c->type != type || skb_queue_empty(&c->data_q))
2341 			continue;
2342 
2343 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2344 			continue;
2345 
2346 		num++;
2347 
2348 		if (c->sent < min) {
2349 			min  = c->sent;
2350 			conn = c;
2351 		}
2352 
2353 		if (hci_conn_num(hdev, type) == num)
2354 			break;
2355 	}
2356 
2357 	rcu_read_unlock();
2358 
2359 	if (conn) {
2360 		int cnt, q;
2361 
2362 		switch (conn->type) {
2363 		case ACL_LINK:
2364 			cnt = hdev->acl_cnt;
2365 			break;
2366 		case SCO_LINK:
2367 		case ESCO_LINK:
2368 			cnt = hdev->sco_cnt;
2369 			break;
2370 		case LE_LINK:
2371 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2372 			break;
2373 		default:
2374 			cnt = 0;
2375 			BT_ERR("Unknown link type");
2376 		}
2377 
2378 		q = cnt / num;
2379 		*quote = q ? q : 1;
2380 	} else
2381 		*quote = 0;
2382 
2383 	BT_DBG("conn %p quote %d", conn, *quote);
2384 	return conn;
2385 }
2386 
2387 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2388 {
2389 	struct hci_conn_hash *h = &hdev->conn_hash;
2390 	struct hci_conn *c;
2391 
2392 	BT_ERR("%s link tx timeout", hdev->name);
2393 
2394 	rcu_read_lock();
2395 
2396 	/* Kill stalled connections */
2397 	list_for_each_entry_rcu(c, &h->list, list) {
2398 		if (c->type == type && c->sent) {
2399 			BT_ERR("%s killing stalled connection %pMR",
2400 			       hdev->name, &c->dst);
2401 			hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2402 		}
2403 	}
2404 
2405 	rcu_read_unlock();
2406 }
2407 
2408 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2409 				      int *quote)
2410 {
2411 	struct hci_conn_hash *h = &hdev->conn_hash;
2412 	struct hci_chan *chan = NULL;
2413 	unsigned int num = 0, min = ~0, cur_prio = 0;
2414 	struct hci_conn *conn;
2415 	int cnt, q, conn_num = 0;
2416 
2417 	BT_DBG("%s", hdev->name);
2418 
2419 	rcu_read_lock();
2420 
2421 	list_for_each_entry_rcu(conn, &h->list, list) {
2422 		struct hci_chan *tmp;
2423 
2424 		if (conn->type != type)
2425 			continue;
2426 
2427 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2428 			continue;
2429 
2430 		conn_num++;
2431 
2432 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2433 			struct sk_buff *skb;
2434 
2435 			if (skb_queue_empty(&tmp->data_q))
2436 				continue;
2437 
2438 			skb = skb_peek(&tmp->data_q);
2439 			if (skb->priority < cur_prio)
2440 				continue;
2441 
2442 			if (skb->priority > cur_prio) {
2443 				num = 0;
2444 				min = ~0;
2445 				cur_prio = skb->priority;
2446 			}
2447 
2448 			num++;
2449 
2450 			if (conn->sent < min) {
2451 				min  = conn->sent;
2452 				chan = tmp;
2453 			}
2454 		}
2455 
2456 		if (hci_conn_num(hdev, type) == conn_num)
2457 			break;
2458 	}
2459 
2460 	rcu_read_unlock();
2461 
2462 	if (!chan)
2463 		return NULL;
2464 
2465 	switch (chan->conn->type) {
2466 	case ACL_LINK:
2467 		cnt = hdev->acl_cnt;
2468 		break;
2469 	case AMP_LINK:
2470 		cnt = hdev->block_cnt;
2471 		break;
2472 	case SCO_LINK:
2473 	case ESCO_LINK:
2474 		cnt = hdev->sco_cnt;
2475 		break;
2476 	case LE_LINK:
2477 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2478 		break;
2479 	default:
2480 		cnt = 0;
2481 		BT_ERR("Unknown link type");
2482 	}
2483 
2484 	q = cnt / num;
2485 	*quote = q ? q : 1;
2486 	BT_DBG("chan %p quote %d", chan, *quote);
2487 	return chan;
2488 }
2489 
2490 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2491 {
2492 	struct hci_conn_hash *h = &hdev->conn_hash;
2493 	struct hci_conn *conn;
2494 	int num = 0;
2495 
2496 	BT_DBG("%s", hdev->name);
2497 
2498 	rcu_read_lock();
2499 
2500 	list_for_each_entry_rcu(conn, &h->list, list) {
2501 		struct hci_chan *chan;
2502 
2503 		if (conn->type != type)
2504 			continue;
2505 
2506 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2507 			continue;
2508 
2509 		num++;
2510 
2511 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2512 			struct sk_buff *skb;
2513 
2514 			if (chan->sent) {
2515 				chan->sent = 0;
2516 				continue;
2517 			}
2518 
2519 			if (skb_queue_empty(&chan->data_q))
2520 				continue;
2521 
2522 			skb = skb_peek(&chan->data_q);
2523 			if (skb->priority >= HCI_PRIO_MAX - 1)
2524 				continue;
2525 
2526 			skb->priority = HCI_PRIO_MAX - 1;
2527 
2528 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2529 			       skb->priority);
2530 		}
2531 
2532 		if (hci_conn_num(hdev, type) == num)
2533 			break;
2534 	}
2535 
2536 	rcu_read_unlock();
2537 
2538 }
2539 
2540 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2541 {
2542 	/* Calculate count of blocks used by this packet */
2543 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2544 }
2545 
2546 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2547 {
2548 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2549 		/* ACL tx timeout must be longer than maximum
2550 		 * link supervision timeout (40.9 seconds) */
2551 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2552 				       HCI_ACL_TX_TIMEOUT))
2553 			hci_link_tx_to(hdev, ACL_LINK);
2554 	}
2555 }
2556 
2557 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2558 {
2559 	unsigned int cnt = hdev->acl_cnt;
2560 	struct hci_chan *chan;
2561 	struct sk_buff *skb;
2562 	int quote;
2563 
2564 	__check_timeout(hdev, cnt);
2565 
2566 	while (hdev->acl_cnt &&
2567 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2568 		u32 priority = (skb_peek(&chan->data_q))->priority;
2569 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2570 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2571 			       skb->len, skb->priority);
2572 
2573 			/* Stop if priority has changed */
2574 			if (skb->priority < priority)
2575 				break;
2576 
2577 			skb = skb_dequeue(&chan->data_q);
2578 
2579 			hci_conn_enter_active_mode(chan->conn,
2580 						   bt_cb(skb)->force_active);
2581 
2582 			hci_send_frame(skb);
2583 			hdev->acl_last_tx = jiffies;
2584 
2585 			hdev->acl_cnt--;
2586 			chan->sent++;
2587 			chan->conn->sent++;
2588 		}
2589 	}
2590 
2591 	if (cnt != hdev->acl_cnt)
2592 		hci_prio_recalculate(hdev, ACL_LINK);
2593 }
2594 
2595 static void hci_sched_acl_blk(struct hci_dev *hdev)
2596 {
2597 	unsigned int cnt = hdev->block_cnt;
2598 	struct hci_chan *chan;
2599 	struct sk_buff *skb;
2600 	int quote;
2601 	u8 type;
2602 
2603 	__check_timeout(hdev, cnt);
2604 
2605 	BT_DBG("%s", hdev->name);
2606 
2607 	if (hdev->dev_type == HCI_AMP)
2608 		type = AMP_LINK;
2609 	else
2610 		type = ACL_LINK;
2611 
2612 	while (hdev->block_cnt > 0 &&
2613 	       (chan = hci_chan_sent(hdev, type, &quote))) {
2614 		u32 priority = (skb_peek(&chan->data_q))->priority;
2615 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2616 			int blocks;
2617 
2618 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2619 			       skb->len, skb->priority);
2620 
2621 			/* Stop if priority has changed */
2622 			if (skb->priority < priority)
2623 				break;
2624 
2625 			skb = skb_dequeue(&chan->data_q);
2626 
2627 			blocks = __get_blocks(hdev, skb);
2628 			if (blocks > hdev->block_cnt)
2629 				return;
2630 
2631 			hci_conn_enter_active_mode(chan->conn,
2632 						   bt_cb(skb)->force_active);
2633 
2634 			hci_send_frame(skb);
2635 			hdev->acl_last_tx = jiffies;
2636 
2637 			hdev->block_cnt -= blocks;
2638 			quote -= blocks;
2639 
2640 			chan->sent += blocks;
2641 			chan->conn->sent += blocks;
2642 		}
2643 	}
2644 
2645 	if (cnt != hdev->block_cnt)
2646 		hci_prio_recalculate(hdev, type);
2647 }
2648 
2649 static void hci_sched_acl(struct hci_dev *hdev)
2650 {
2651 	BT_DBG("%s", hdev->name);
2652 
2653 	/* No ACL link over BR/EDR controller */
2654 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2655 		return;
2656 
2657 	/* No AMP link over AMP controller */
2658 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2659 		return;
2660 
2661 	switch (hdev->flow_ctl_mode) {
2662 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
2663 		hci_sched_acl_pkt(hdev);
2664 		break;
2665 
2666 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2667 		hci_sched_acl_blk(hdev);
2668 		break;
2669 	}
2670 }
2671 
2672 /* Schedule SCO */
2673 static void hci_sched_sco(struct hci_dev *hdev)
2674 {
2675 	struct hci_conn *conn;
2676 	struct sk_buff *skb;
2677 	int quote;
2678 
2679 	BT_DBG("%s", hdev->name);
2680 
2681 	if (!hci_conn_num(hdev, SCO_LINK))
2682 		return;
2683 
2684 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2685 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2686 			BT_DBG("skb %p len %d", skb, skb->len);
2687 			hci_send_frame(skb);
2688 
2689 			conn->sent++;
2690 			if (conn->sent == ~0)
2691 				conn->sent = 0;
2692 		}
2693 	}
2694 }
2695 
2696 static void hci_sched_esco(struct hci_dev *hdev)
2697 {
2698 	struct hci_conn *conn;
2699 	struct sk_buff *skb;
2700 	int quote;
2701 
2702 	BT_DBG("%s", hdev->name);
2703 
2704 	if (!hci_conn_num(hdev, ESCO_LINK))
2705 		return;
2706 
2707 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2708 						     &quote))) {
2709 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2710 			BT_DBG("skb %p len %d", skb, skb->len);
2711 			hci_send_frame(skb);
2712 
2713 			conn->sent++;
2714 			if (conn->sent == ~0)
2715 				conn->sent = 0;
2716 		}
2717 	}
2718 }
2719 
2720 static void hci_sched_le(struct hci_dev *hdev)
2721 {
2722 	struct hci_chan *chan;
2723 	struct sk_buff *skb;
2724 	int quote, cnt, tmp;
2725 
2726 	BT_DBG("%s", hdev->name);
2727 
2728 	if (!hci_conn_num(hdev, LE_LINK))
2729 		return;
2730 
2731 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2732 		/* LE tx timeout must be longer than maximum
2733 		 * link supervision timeout (40.9 seconds) */
2734 		if (!hdev->le_cnt && hdev->le_pkts &&
2735 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
2736 			hci_link_tx_to(hdev, LE_LINK);
2737 	}
2738 
2739 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2740 	tmp = cnt;
2741 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2742 		u32 priority = (skb_peek(&chan->data_q))->priority;
2743 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2744 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2745 			       skb->len, skb->priority);
2746 
2747 			/* Stop if priority has changed */
2748 			if (skb->priority < priority)
2749 				break;
2750 
2751 			skb = skb_dequeue(&chan->data_q);
2752 
2753 			hci_send_frame(skb);
2754 			hdev->le_last_tx = jiffies;
2755 
2756 			cnt--;
2757 			chan->sent++;
2758 			chan->conn->sent++;
2759 		}
2760 	}
2761 
2762 	if (hdev->le_pkts)
2763 		hdev->le_cnt = cnt;
2764 	else
2765 		hdev->acl_cnt = cnt;
2766 
2767 	if (cnt != tmp)
2768 		hci_prio_recalculate(hdev, LE_LINK);
2769 }
2770 
2771 static void hci_tx_work(struct work_struct *work)
2772 {
2773 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2774 	struct sk_buff *skb;
2775 
2776 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2777 	       hdev->sco_cnt, hdev->le_cnt);
2778 
2779 	/* Schedule queues and send stuff to HCI driver */
2780 
2781 	hci_sched_acl(hdev);
2782 
2783 	hci_sched_sco(hdev);
2784 
2785 	hci_sched_esco(hdev);
2786 
2787 	hci_sched_le(hdev);
2788 
2789 	/* Send next queued raw (unknown type) packet */
2790 	while ((skb = skb_dequeue(&hdev->raw_q)))
2791 		hci_send_frame(skb);
2792 }
2793 
2794 /* ----- HCI RX task (incoming data processing) ----- */
2795 
2796 /* ACL data packet */
2797 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2798 {
2799 	struct hci_acl_hdr *hdr = (void *) skb->data;
2800 	struct hci_conn *conn;
2801 	__u16 handle, flags;
2802 
2803 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2804 
2805 	handle = __le16_to_cpu(hdr->handle);
2806 	flags  = hci_flags(handle);
2807 	handle = hci_handle(handle);
2808 
2809 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2810 	       handle, flags);
2811 
2812 	hdev->stat.acl_rx++;
2813 
2814 	hci_dev_lock(hdev);
2815 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2816 	hci_dev_unlock(hdev);
2817 
2818 	if (conn) {
2819 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2820 
2821 		/* Send to upper protocol */
2822 		l2cap_recv_acldata(conn, skb, flags);
2823 		return;
2824 	} else {
2825 		BT_ERR("%s ACL packet for unknown connection handle %d",
2826 		       hdev->name, handle);
2827 	}
2828 
2829 	kfree_skb(skb);
2830 }
2831 
2832 /* SCO data packet */
2833 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2834 {
2835 	struct hci_sco_hdr *hdr = (void *) skb->data;
2836 	struct hci_conn *conn;
2837 	__u16 handle;
2838 
2839 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2840 
2841 	handle = __le16_to_cpu(hdr->handle);
2842 
2843 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2844 
2845 	hdev->stat.sco_rx++;
2846 
2847 	hci_dev_lock(hdev);
2848 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2849 	hci_dev_unlock(hdev);
2850 
2851 	if (conn) {
2852 		/* Send to upper protocol */
2853 		sco_recv_scodata(conn, skb);
2854 		return;
2855 	} else {
2856 		BT_ERR("%s SCO packet for unknown connection handle %d",
2857 		       hdev->name, handle);
2858 	}
2859 
2860 	kfree_skb(skb);
2861 }
2862 
2863 static void hci_rx_work(struct work_struct *work)
2864 {
2865 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2866 	struct sk_buff *skb;
2867 
2868 	BT_DBG("%s", hdev->name);
2869 
2870 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2871 		/* Send copy to monitor */
2872 		hci_send_to_monitor(hdev, skb);
2873 
2874 		if (atomic_read(&hdev->promisc)) {
2875 			/* Send copy to the sockets */
2876 			hci_send_to_sock(hdev, skb);
2877 		}
2878 
2879 		if (test_bit(HCI_RAW, &hdev->flags)) {
2880 			kfree_skb(skb);
2881 			continue;
2882 		}
2883 
2884 		if (test_bit(HCI_INIT, &hdev->flags)) {
2885 			/* Don't process data packets in this states. */
2886 			switch (bt_cb(skb)->pkt_type) {
2887 			case HCI_ACLDATA_PKT:
2888 			case HCI_SCODATA_PKT:
2889 				kfree_skb(skb);
2890 				continue;
2891 			}
2892 		}
2893 
2894 		/* Process frame */
2895 		switch (bt_cb(skb)->pkt_type) {
2896 		case HCI_EVENT_PKT:
2897 			BT_DBG("%s Event packet", hdev->name);
2898 			hci_event_packet(hdev, skb);
2899 			break;
2900 
2901 		case HCI_ACLDATA_PKT:
2902 			BT_DBG("%s ACL data packet", hdev->name);
2903 			hci_acldata_packet(hdev, skb);
2904 			break;
2905 
2906 		case HCI_SCODATA_PKT:
2907 			BT_DBG("%s SCO data packet", hdev->name);
2908 			hci_scodata_packet(hdev, skb);
2909 			break;
2910 
2911 		default:
2912 			kfree_skb(skb);
2913 			break;
2914 		}
2915 	}
2916 }
2917 
2918 static void hci_cmd_work(struct work_struct *work)
2919 {
2920 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2921 	struct sk_buff *skb;
2922 
2923 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2924 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2925 
2926 	/* Send queued commands */
2927 	if (atomic_read(&hdev->cmd_cnt)) {
2928 		skb = skb_dequeue(&hdev->cmd_q);
2929 		if (!skb)
2930 			return;
2931 
2932 		kfree_skb(hdev->sent_cmd);
2933 
2934 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2935 		if (hdev->sent_cmd) {
2936 			atomic_dec(&hdev->cmd_cnt);
2937 			hci_send_frame(skb);
2938 			if (test_bit(HCI_RESET, &hdev->flags))
2939 				del_timer(&hdev->cmd_timer);
2940 			else
2941 				mod_timer(&hdev->cmd_timer,
2942 					  jiffies + HCI_CMD_TIMEOUT);
2943 		} else {
2944 			skb_queue_head(&hdev->cmd_q, skb);
2945 			queue_work(hdev->workqueue, &hdev->cmd_work);
2946 		}
2947 	}
2948 }
2949 
2950 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2951 {
2952 	/* General inquiry access code (GIAC) */
2953 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2954 	struct hci_cp_inquiry cp;
2955 
2956 	BT_DBG("%s", hdev->name);
2957 
2958 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2959 		return -EINPROGRESS;
2960 
2961 	inquiry_cache_flush(hdev);
2962 
2963 	memset(&cp, 0, sizeof(cp));
2964 	memcpy(&cp.lap, lap, sizeof(cp.lap));
2965 	cp.length  = length;
2966 
2967 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2968 }
2969 
2970 int hci_cancel_inquiry(struct hci_dev *hdev)
2971 {
2972 	BT_DBG("%s", hdev->name);
2973 
2974 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
2975 		return -EALREADY;
2976 
2977 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2978 }
2979 
2980 u8 bdaddr_to_le(u8 bdaddr_type)
2981 {
2982 	switch (bdaddr_type) {
2983 	case BDADDR_LE_PUBLIC:
2984 		return ADDR_LE_DEV_PUBLIC;
2985 
2986 	default:
2987 		/* Fallback to LE Random address type */
2988 		return ADDR_LE_DEV_RANDOM;
2989 	}
2990 }
2991